content stringlengths 5 1.05M |
|---|
from __future__ import print_function
import re
from copy import deepcopy
from itertools import product
from builtins import map
from builtins import object
from builtins import range
from builtins import zip
class Cut(object):
def __init__(self, selection):
self.is_data = False
self.is_mc = False
self.process_type = None
if ':::' in selection:
self.selection, self.name = selection.split(':::')
else:
self.name = selection
self.selection = selection
if 'DATA:' in self.selection:
self.selection = self.selection.replace('DATA:', '')
self.is_data = True
if 'MC:' in self.selection:
self.selection = self.selection.replace('MC:', '')
self.is_mc = True
elif re.match(r'TYPE_[A-Z].*:', self.selection):
process_type = re.match(r'TYPE_[A-Z].*:', self.selection).group(0)
self.selection = self.selection.replace(process_type, '')
self.process_type = process_type.replace('TYPE_', '').rstrip(':').lower()
def __eq__(self, other):
"""
Comparison operator
:param other: Cut object to compare to
:type other: Cut
:return: True/False
:rtype: boolean
"""
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return False
def __str__(self):
"""
Overloaded str operator. Get's called if object is printed
:return: formatted string with name and attributes
:rtype: str
"""
obj_str = "Cut object named {:s} and selection {:s}".format(self.name, self.selection)
return obj_str
def __repr__(self):
"""
Overloads representation operator. Get's called e.g. if list of objects are printed
:return: formatted string with name and attributes
:rtype: str
"""
return self.__str__() + '\n'
class Region(object):
def __init__(self, **kwargs):
kwargs.setdefault("n_lep", -1)
kwargs.setdefault("n_electron", -1)
kwargs.setdefault("n_muon", -1)
# limit specific settings to help HistFactory setup
kwargs.setdefault("norm_region", False)
kwargs.setdefault("val_region", False)
kwargs.setdefault("channel", None)
kwargs.setdefault("label", None)
kwargs.setdefault("tex_label", None)
kwargs.setdefault("norm_backgrounds", {})
self.name = kwargs["name"]
self.n_lep = kwargs["n_lep"]
self.n_electron = kwargs["n_electron"]
self.n_muon = kwargs["n_muon"]
try:
self.n_tau = kwargs["n_tau"]
except KeyError:
self.n_tau = 0
kwargs.setdefault("disable_leptons", False)
kwargs.setdefault("disable_taus", False)
kwargs.setdefault("disable_electrons", False)
kwargs.setdefault("disable_muons", False)
kwargs.setdefault("is_on_z", None)
kwargs.setdefault("operator", "eq")
kwargs.setdefault("muon_operator", "eq")
kwargs.setdefault("electron_operator", "eq")
kwargs.setdefault("label", None)
kwargs.setdefault('label_position', None)
kwargs.setdefault("good_muon", None)
kwargs.setdefault("fake_muon", None)
kwargs.setdefault("inverted_muon", None)
kwargs.setdefault("good_electron", None)
kwargs.setdefault("inverted_electron", None)
kwargs.setdefault("event_cuts", None)
kwargs.setdefault("split_mc_data", False)
kwargs.setdefault("common_selection", None)
kwargs.setdefault("weight", None)
kwargs.setdefault("binning", None)
for k, v in list(kwargs.items()):
setattr(self, k.lower(), v)
if self.label is None:
self.build_label()
self.cut_list = []
self.parse_operators('operator', kwargs)
self.parse_operators('muon_operator', kwargs)
self.parse_operators('electron_operator', kwargs)
self.build_cuts()
def parse_operators(self, name, kwargs):
kwargs.setdefault(name, 'eq')
if kwargs[name] == "eq":
setattr(self, name, '==')
elif kwargs[name] == 'geq':
setattr(self, name, '>=')
elif kwargs[name] == 'leq':
setattr(self, name, '<=')
else:
raise ValueError("Invalid operator provided for {:s}. "
"Currently supported: eq(==), geq(>=) and leq(<=)".format(name))
def __eq__(self, other):
"""
Comparison operator
:param other: Region object to compare to
:type other: Region
:return: True/False
:rtype: boolean
"""
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
def __str__(self):
"""
Overloaded str operator. Get's called if object is printed
:return: formatted string with name and attributes
:rtype: str
"""
obj_str = "Region: {:s} \n".format(self.name)
for attribute, value in list(self.__dict__.items()):
if attribute == 'name':
continue
obj_str += '{}={} '.format(attribute, value)
return obj_str
def __repr__(self):
"""
Overloads representation operator. Get's called e.g. if list of objects are printed
:return: formatted string with name and attributes
:rtype: str
"""
return self.__str__() + '\n'
def build_cuts(self):
self.cut_list = self.build_cut_list(self.event_cuts, 'event_cuts')
if not self.disable_leptons:
self.convert_lepton_selections()
self.cut_list += self.build_cut_list(None, 'post_sel_cuts')
def build_cut_list(self, cut_list, selection=None):
tmp_cut_list = []
if self.common_selection is not None and selection is not None:
if selection in self.common_selection:
tmp_cut_list += [Cut(sel) for sel in self.common_selection[selection]]
if cut_list is not None:
tmp_cut_list += [Cut(c) for c in cut_list]
return tmp_cut_list
def build_particle_cut(self, cut_list, selection, operator, particle, count):
if cut_list is None:
cut_list = []
if self.common_selection is not None and selection is not None:
if selection in self.common_selection:
cut_list += self.common_selection[selection]
if not cut_list:
return Cut('{:s} {:s} {:d}'.format(particle, operator, count))
return Cut('Sum$({:s}) == {:s} && {:s} {:s} {:d}'.format('&& '.join(cut_list), particle,
particle, operator, count))
def get_cut_list(self, is_data=False):
"""
Retrieve cut list for region. Replace data/MC-only selections according to is_data flag
:param is_data: flag if cut list should be retrieved for data or MC
:type is_data: boolean
:return: cut list
:rtype: list
"""
def validate_cut(cut):
if cut.is_data and not is_data:
cut = deepcopy(cut)
cut.selection = '1'
if cut.is_mc and is_data:
cut = deepcopy(cut)
cut.selection = '1'
return cut
return [validate_cut(c) for c in self.cut_list]
def convert_lepton_selections(self):
"""
build lepton selection depending on available definitions for good (signal-like) and bad (background side-band)
lepton definitions
:return: None
:rtype: None
"""
# found_particle_cut = False
if self.good_muon or self.common_selection and "good_muon" in self.common_selection:
self.cut_list.append(self.build_particle_cut(self.good_muon, "good_muon", self.muon_operator,
'muon_n', self.n_muon))
# found_particle_cut = True
if self.good_electron or self.common_selection and "good_electron" in self.common_selection:
self.cut_list.append(self.build_particle_cut(self.good_electron, "good_electron", self.electron_operator,
'electron_n', self.n_electron))
# found_particle_cut = True
if self.fake_muon:
self.inverted_muon_cut_string = self.convert_cut_list_to_string(self.inverted_muon)
# if not found_particle_cut and self.n_electron > 0 or self.n_muon > 0:
# self.cut_list.append(['Sum$({:s}) == {:s}')
def build_label(self):
"""
Constructs optional label for region from number of leptons and
:return:
:rtype:
"""
self.label = "".join([a * b for a, b in zip(["e^{#pm}", "#mu^{#pm}", "#tau^{#pm}"],
[self.n_electron, self.n_muon, self.n_tau])])
if self.is_on_z is not None:
self.label += " on-Z" if self.is_on_z else " off-Z"
def convert2cut_string(self):
if len(self.cut_list) == 0:
return ''
return '&&'.join([c.selection for c in self.cut_list])
class RegionBuilder(object):
def __init__(self, **kwargs):
"""
contructor
:param kwargs: see below
:type kwargs:
:Keyword Arguments:
* *auto_generate* (bool): enable automatic generation of regions based on possible combinations.
"""
self.regions = []
kwargs.setdefault("auto_generate", False)
kwargs.setdefault("disable_taus", False)
kwargs.setdefault("split_z_mass", False)
kwargs.setdefault("same_flavour_only", False)
kwargs.setdefault("modify_mc_data_split", False)
kwargs.setdefault("common_selection", None)
if kwargs["auto_generate"]:
self.auto_generate_region(**kwargs)
if "regions" in kwargs:
for region_name, region_def in list(kwargs["regions"].items()):
if kwargs["modify_mc_data_split"]:
region_def["split_mc_data"] = False
self.regions.append(Region(name=region_name,
common_selection=kwargs["common_selection"],
**region_def))
self.type = "PCModifier"
def __str__(self):
"""
Overloaded str operator. Get's called if object is printed
:return: formatted string for all regions
:rtype: str
"""
print(self.regions)
def auto_generate_region(self, **kwargs):
n_leptons = kwargs["nleptons"]
for digits in product("".join(map(str, list(range(n_leptons + 1)))), repeat=3):
comb = list(map(int, digits))
if sum(comb) == n_leptons:
if kwargs["same_flavour_only"] and not comb.count(0) == 2:
continue
name = "".join([a * b for a, b in zip(["e", "m", "t"], comb)])
if kwargs["disable_taus"] and comb[2] > 0:
continue
if kwargs["split_z_mass"]:
self.regions.append(Region(name=name + "_onZ", n_lep=n_leptons, n_electron=comb[0], n_muon=comb[1],
n_tau=comb[2], is_on_z=True, **kwargs))
self.regions.append(Region(name=name + "_offZ", n_lep=n_leptons, n_electron=comb[0], n_muon=comb[1],
n_tau=comb[2], is_on_z=False, **kwargs))
else:
self.regions.append(Region(name=name, n_lep=n_leptons, n_electron=comb[0], n_muon=comb[1],
n_tau=comb[2], **kwargs))
def modify_plot_configs(self, plot_configs):
tmp = []
for region in self.regions:
for pc in plot_configs:
region_pc = deepcopy(pc)
cuts = [c.selection for c in region.get_cut_list()]
if region_pc.cuts is None:
region_pc.cuts = cuts # [region.convert2cut_string()]
else:
region_pc.cuts += cuts # .append(region.convert2cut_string())
if region.weight:
if region_pc.weight is not None and not region_pc.weight.lower() == "none":
region_pc.weight += " * {:s}".format(region.weight)
else:
region_pc.weight = region.weight
region_pc.name = "{:s}_{:s}".format(region.name, pc.name)
region_pc.decor_text = region.label
region_pc.region = region
tmp.append(region_pc)
return tmp
def execute(self, plot_configs):
return self.modify_plot_configs(plot_configs)
|
'''
This tool will generate source insight 4 project file list
from build output file (*.dep),then we can import the
file list in the source insight 4 project.
'''
import os
import sys
import xml.etree.ElementTree as ET
from datetime import datetime
import re
# 1、Find .dep file
projectfilename = ''
sourcefile = ''
outputfile = ''
for entry in os.scandir():
if entry.is_file():
#if entry.name.endswith('.eww'):
if False:
projectfilename = entry.name
# find current target
wsdtfile = os.path.join(os.getcwd(), 'settings')
wsdtfile = os.path.join(wsdtfile, entry.name.replace('.eww', '.wsdt'))
if os.path.exists(wsdtfile):
tree = ET.ElementTree(file=wsdtfile)
ConfigDictionary = tree.find('ConfigDictionary')
CurrentConfigs = ConfigDictionary.find('CurrentConfigs')
TargetName = CurrentConfigs.find('Project').text.split('/')[1]
depfilename = CurrentConfigs.find('Project').text.split('/')[0] + '.dep'
if os.path.exists(depfilename):
sourcefile = depfilename
outputfile = os.path.splitext(projectfilename)[0]
break
print('Please build the project once1')
input()
sys.exit(0)
elif entry.name.endswith('.uvprojx'):
projectfilename = entry.name
# if entry.name.endswith('.uvproj'):
# uvoptfile = entry.name.replace('.uvproj', '.uvopt')
# elif entry.name.endswith('.uvprojx'):
# uvoptfile = entry.name.replace('.uvprojx', '.uvoptx')
uvoptfile = entry.name.replace('.uvprojx', '.uvoptx')
tree = ET.ElementTree(file=uvoptfile)
# find current target
for tag in tree.findall('Target'):
TargetOption = tag.find('TargetOption')
OPTFL = TargetOption.find('OPTFL')
IsCurrentTarget = int(OPTFL.find('IsCurrentTarget').text)
if IsCurrentTarget:
TargetName = tag.find('TargetName').text
break
# find dep file of current target
Extensions = tree.find('Extensions')
print(Extensions.findtext('nMigrate'))
#if None == Extensions.findtext('nMigrate'):
if False:
# ide is keil4
depfilename = os.path.splitext(projectfilename)[0] + '_' + TargetName + '.dep'
print("mark2"+depfilename)
print("projectfilename "+projectfilename)
if os.path.exists(depfilename):
sourcefile = depfilename
outputfile = os.path.splitext(projectfilename)[0]
else:
#print(os.path.exists(depfilename))
print(os.path)
print(projectfilename)
else:
# ide is keil5
tree = ET.ElementTree(file=entry.name)
print("here1")
for tag in tree.find('Targets').findall('Target'):
print("here2")
if tag.find('TargetName').text == TargetName:
print("here3")
TargetOption = tag.find('TargetOption')
TargetCommonOption = TargetOption.find('TargetCommonOption')
OutputDirectory = TargetCommonOption.find('OutputDirectory').text
OutputDirectory = os.path.normpath(os.path.join(os.getcwd(), OutputDirectory))
depfilename = os.path.splitext(projectfilename)[0] + '_' + TargetName + '.dep'
depfilename = os.path.join(OutputDirectory, depfilename)
print("mark1"+depfilename)
if os.path.exists(depfilename):
print("here4")
sourcefile = depfilename
outputfile = os.path.splitext(projectfilename)[0]
print("os.path.exists ")
break
else:
print(os.path.exists(depfilename))
if '' == sourcefile:
print(depfilename)
#print(os.path)
print('Please build the project once2')
input()
sys.exit(0)
break
if '' == projectfilename:
print('Can not find project file, enter any key to exit')
input()
sys.exit(0)
#2、parse the seleted dep file
parsefile = open(sourcefile, 'r')
si4filelist = []
if projectfilename.endswith('.eww'):
tree = ET.ElementTree(file=parsefile)
for tag in tree.findall('configuration'):
if TargetName == tag.find('name').text:
output_tag = tag.find('outputs')
for elem in output_tag.findall('file'):
if elem.text.startswith('$PROJ_DIR$'):
if elem.text.endswith('.c') or elem.text.endswith('.s') or elem.text.endswith('.h'):
si4filelist.append(os.path.abspath(elem.text.replace('$PROJ_DIR$', os.getcwd()))+'\n')
break
elif projectfilename.endswith('.uvproj') or projectfilename.endswith('.uvprojx'):
for line in parsefile.readlines():
m = re.search(r"^F \(.*?\)(?=\([\dxa-fA-F]{10}\))|^I \(.*?\)(?=\([\dxa-fA-F]{10}\))", line)
if None != m:
relpath = m.group(0)[3:-1]
si4filelist.append(os.path.abspath(relpath)+'\n')
si4filelist = set(si4filelist)
#3、save the lists
outputfile = open(outputfile + '.si4project_filelist.txt', 'w')
outputfile.write('; Source Insight Project File List\n')
outputfile.write('; Project Name: '+os.path.splitext(sourcefile)[0]+'\n')
outputfile.write('; Generated by si4project_filelist.py at '+datetime.now().strftime('%Y/%m/%d %H:%M:%S')+'\n')
outputfile.write('; Version=4.00.xxxx\n')
outputfile.write(';\n')
outputfile.write('; Each line should contain either a file name, a wildcard, or a sub-directory name.\n')
outputfile.write('; File paths are relative to the project source root directory.\n')
outputfile.write(';\n')
outputfile.writelines(si4filelist)
outputfile.close()
|
import os
import sys
sys.path.insert(0, os.path.dirname(__file__))
|
with_class = testing.expect.with_class
extract_text = testing.expect.extract_text
frontend.page(
"news",
expect={ "document_title": testing.expect.document_title(u"News"),
"content_title": testing.expect.paleyellow_title(0, u"News"),
"pageheader_links": testing.expect.pageheader_links("anonymous"),
"script_user": testing.expect.script_no_user() })
# Load all news items to make sure they are syntactically correct.
#
# There may not be any, and we can't easily test that the right set of news
# items are listed, since this depends on whether we upgraded and from what.
# But this testing is still somewhat meaningful.
document = frontend.page("news", params={ "display": "all" })
items = document.findAll(attrs=with_class("item"))
for item in items:
item_id = item["critic-item-id"]
item_title = extract_text(item.find(attrs=with_class("title")))
frontend.page(
"news",
params={ "item": item_id },
expect={ "document_title": testing.expect.document_title(item_title),
"content_title": testing.expect.paleyellow_title(0, item_title),
"pageheader_links": testing.expect.pageheader_links("anonymous"),
"script_user": testing.expect.script_no_user() })
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-26 14:33
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('common', '0029_organization_logo'),
]
operations = [
migrations.RemoveField(
model_name='journal',
name='class_choice',
),
migrations.AddField(
model_name='journal',
name='area',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='journal',
name='association',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='journal',
name='is_featured_journal',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='journal',
name='is_preregistered_journal',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='journal',
name='is_registered_journal',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='journal',
name='is_special_journal',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='journal',
name='is_top_journal',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='journal',
name='publisher',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='journal',
name='additional',
field=wagtail.wagtailcore.fields.StreamField((('journal', wagtail.wagtailcore.blocks.RawHTMLBlock()), ('note', wagtail.wagtailcore.blocks.RawHTMLBlock())), blank=True, null=True),
),
]
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2017 Jadson Bonfim Ribeiro <contato@jadsonbr.com.br>
#
import os
import subprocess
import re
import xml.etree.ElementTree as ET
FORMATS = (
'pdf',
'rtf',
'xls',
'xlsx',
'docx',
'odt',
'ods',
'pptx',
'csv',
'html',
'xhtml',
'xml',
'jrprint',
)
EXECUTABLE = 'jasperstarter'
class JasperPy:
def __init__(self, resource_dir=False):
self.path_executable = os.path.dirname(os.path.abspath(__file__)) \
+ '/jasperstarter/bin'
self.windows = True if os.name == 'nt' else False
self._command = ''
if not resource_dir:
resource_dir = os.path.dirname(os.path.abspath(__file__)) \
+ '/jasperstarter/bin'
else:
if not os.path.exists(resource_dir):
raise NameError('Invalid resource directory!')
# Path to report resource dir or jar file
self.resource_directory = resource_dir
def compile(self, input_file, output_file=False):
if not input_file:
raise NameError('No input file!')
command = self.path_executable + '/' + EXECUTABLE
command += ' compile '
command += "\"%s\"" % input_file
if output_file:
command += ' -o ' + "\"%s\"" % output_file
self._command = command
return self.execute()
def process(self, input_file, output_file=False, format_list=['pdf'],
parameters={}, db_connection={}, locale='pt_BR', resource=""):
if not input_file:
raise NameError('No input file!')
if isinstance(format_list, list):
if any([key not in FORMATS for key in format_list]):
raise NameError('Invalid format!')
else:
raise NameError("'format_list' value is not list!")
command = self.path_executable + '/' + EXECUTABLE
command += " --locale %s" % locale
command += ' process '
command += "\"%s\"" % input_file
if output_file:
command += ' -o ' + "\"%s\"" % output_file
command += ' -f ' + ' '.join(format_list)
if len(parameters) > 0:
command += ' -P '
for key, value in parameters.items():
param = key + '="' + value + '" '
command += " " + param + " "
if len(db_connection) > 0:
command += ' -t ' + db_connection['driver']
if 'username' in db_connection:
command += " -u " + db_connection['username']
if 'password' in db_connection:
command += ' -p ' + db_connection['password']
if 'host' in db_connection:
command += ' -H ' + db_connection['host']
if 'database' in db_connection:
command += ' -n ' + db_connection['database']
if 'port' in db_connection:
command += ' --db-port ' + db_connection['port']
if 'jdbc_driver' in db_connection:
command += ' --db-driver ' + db_connection['jdbc_driver']
if 'jdbc_url' in db_connection:
command += ' --db-url ' + db_connection['jdbc_url']
if 'jdbc_dir' in db_connection:
command += ' --jdbc-dir ' + db_connection['jdbc_dir']
if 'db_sid' in db_connection:
command += ' --db-sid ' + db_connection['db_sid']
if 'xml_xpath' in db_connection:
command += ' --xml-xpath ' + db_connection['xml_xpath']
if 'data_file' in db_connection:
command += ' --data-file ' + db_connection['data_file']
if 'json_query' in db_connection:
command += ' --json-query ' + db_connection['json_query']
if 'jsonql_query' in db_connection:
command += ' --jsonql-query ' + db_connection['jsonql_query']
if resource != "":
if (resource == "."):
command += " -r "
else:
command += " -r " + resource
self._command = command
return self.execute()
@staticmethod
def list_parameters(input_xml):
if not input_xml:
raise NameError('No input file!')
f = open(input_xml, 'r')
f_content = f.read()
f.close()
xmlstring = re.sub(' xmlns="[^"]+"', '', f_content, count=1)
param_dic = {}
tree = ET.fromstring(xmlstring)
for item in tree.findall(
'parameter'):
if item.get('name'):
param_dic.update({item.get('name'): [item.get('class')]})
if list(item):
param_dic[item.get('name')].append(list(item)[0].text)
else:
param_dic[item.get('name')].append('')
return param_dic
@property
def command(self):
return self._command
def execute(self, run_as_user=False):
if run_as_user and (not self.windows):
self._command = 'su -u ' + run_as_user + " -c \"" + \
self.command + "\""
if os.path.isdir(self.path_executable):
try:
output = subprocess.run(
self.command, shell=True, check=True, encoding='utf-8', stderr=subprocess.PIPE)
except AttributeError:
output = subprocess.check_call(self.command, shell=True)
except subprocess.CalledProcessError as e:
raise NameError('Your report has an error and couldn\'t be processed!\n' + e.stderr)
else:
raise NameError('Invalid resource directory!')
return output.returncode
|
#!/usr/bin/python3
import requests
import re
URL = "https://www.abuseipdb.com/sitemap?page="
ip_pattern = 'href="https://www.abuseipdb.com/check/(.+)"'
malicious = []
for ip in range(1,195):
r = requests.get(url=URL+str(ip)).text
if re.findall(pattern=ip_pattern, string=r):
malicious.append(re.findall(pattern=ip_pattern, string=r))
break
with open('malicious-ips.txt', 'w') as file:
for i in malicious:
file.writelines('\n'.join(i))
# Sort the output on this site:
# https://www.ipvoid.com/sort-ip-addresses/ |
input_file = '../../../data/merged_base.data'
cf_train_f = '../../../data/cf_train.data'
with open(cf_train_f, 'w') as f:
pass
# User-Item dict
u_i_dict = {}
with open(input_file, 'r') as f_in:
for line in f_in:
ss = line.strip().split('\001')
# print(ss)
user_id, item_id, listen_len, listen_moment, gender, age, salary, user_loc, name, desc, total_time, item_loc, tags = ss
# if name!=desc:
# print([user_id, item_id, listen_len, listen_moment, gender, age, salary, user_loc, name, desc, total_time, item_loc, tags])
u_i_id = '_'.join([user_id, item_id])
if u_i_id not in u_i_dict:
u_i_dict[u_i_id] = []
v = [int(listen_len), int(total_time)]
u_i_dict[u_i_id].append(v)
for k, vs in u_i_dict.items():
score = 0.
for v in vs:
listen_len, total_time = v
score += float(listen_len) / float(total_time) # sum of all times of listen_percent
user_id, item_id = k.strip().split('_')
with open(cf_train_f, 'a') as f_cf:
line = '{},{},{}'.format(user_id, item_id, score)
f_cf.write(line)
f_cf.write('\n')
|
"""
Test szybkości dla rozdzielczości 600x600:
Odczyt, tworzenie i zapis zdjęć (z duplikatami czyli bez tego 'if is_new_data == True'):
Podczas 20 sekund udało się zapisać 455 zdjęć RGB albo 1200 zdjęć w grayscale 1 channel albo 546 zdjęć w grayscale
3 channels.
Tylko odczyt i tworzenie zdjęć, bez ich zapisywania:
Podczas 20 sekund udało się utworzyć 57063568 zdjęć RGB (wszystkie nowe, bez wielokrotnego zapisywania tego samego
zdjęcia) albo miliony zdjęć grayscale albo miliony zdjęć w grayscale 3 channels
Wniosek z tego taki, że samo tworzenie obrazów idzie teraz bardzo szybko, to zapis jest czasochłonny i też to
Video Camera głównie opóżnia, mam na myśli, że obiekt klasy ImagesProcessing szybciej pobiera obrazy z obiektu klasy
VideoCameraServer i je zapisuje niż VideoCameraServer odbiera nowe obrazy z Video Camery, dekoduje je i wystawia jako
nowe zdjęcia.
Jeśli będzie się chciało zapisywać tylko nowe obrazy to wtedy szybkośc drastycznie zmaleje, nawet kilkudziesięciokrotnie,
wtedy pewnie potrzebne będzie zmiejszenie rozdzielczości Video Camery na mniejszą niż 600x600.
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.misc
import argparse
import time
import cv2
from math import sqrt
# from PIL import Image
how_long = 20
default_path = 'obrazy/'
do_save_rgb = 1
do_save_gray_1channel = 0
do_save_gray_3channel = 0
def main(): # main for testing and showing how to use
"""
Function that connects to the MORSE simulation and allows us to access data from camera and process it using
prepared functions or our own algorithms
"""
from video_camera_server import VideoCameraServer
print("Accessing data... ")
parser = parse_args()
image_processing = ImagesProcessing()
video_camera_server = VideoCameraServer('localhost', 60011)
video_camera_server.run()
time.sleep(0.5)
# Checking camera properties to read size of image sent by camera
camera_properties = video_camera_server.get_all()
img_width = camera_properties['width']
img_height = camera_properties['height']
# Loop in which data from camera is downloaded, processed and can be send further
time_end = time.time() + parser.time
licznik = 0
while time.time() < time_end:
# Receiving one sample of data from camera, data field of image is a base64 encoded RGBA image
image = video_camera_server.get_image()
if do_save_rgb:
# RGB Image
if video_camera_server.is_new_data == True:
licznik += 1
video_camera_server.is_new_data = False
rgb_image = image_processing.create_rgb_image(image, img_height, img_width)
# image_processing.plot_rgb_image(rgb_image)
# Save image to png file
image_processing.save_rgb_image_absolute_path(rgb_image, default_path+'rgb'+str(licznik).rjust(4, '0')+'.png')
# image_processing.save_rgb_image_timestamp(image, parser.save)
# plot_rgb_image(rgb_image)
if do_save_gray_1channel:
# Grayscale Image
if video_camera_server.is_new_data == True:
video_camera_server.is_new_data = False
licznik += 1
gray_image = image_processing.create_gray_image(image, img_height, img_width)
# Save image to png file
image_processing.save_grayscale_image_absolute_path(gray_image, default_path+'gray'+str(licznik).rjust(4, '0')+'.png')
# plot_gray_image(gray_image)
if do_save_gray_3channel:
# Grayscale Image
if video_camera_server.is_new_data == True:
licznik += 1
video_camera_server.is_new_data = False
gray_image = image_processing.create_gray_image_3channels(image, img_height, img_width)
# Save image to png file
image_processing.save_grayscale_image_absolute_path(gray_image, default_path+'gray3ch'+str(licznik).rjust(4, '0')+'.png')
# plot_gray_image(gray_image)
print(licznik)
def parse_args():
"""
Function that creates argparse which allows user to pass arguments to script from command-line.
:return: Parser containing variable 'time' (in seconds) which tells how long the script should receive data from
camera and process it and variable 'save' with path to folder in which images are saved
Example of how to pass these arguments:
python3 images_processing_class.py -time 20 -save 'name_of_folder/'
"""
parser = argparse.ArgumentParser(description='Input command line arguments for images_processing_class.py script')
parser.add_argument('-time', metavar="Duration of saving data from camera", type=float,
help='For how much time script will download data from camera and process it (in seconds). '
'Default: 10 seconds',
default=how_long)
parser.add_argument('-save', metavar="Path to output folder", type=str,
help='Path to output folder in which images received from camera are saved. This path includes '
'only path to folder because names of images are generated automatically based on '
'timestamp and type of image (RGB or Grayscale). Default: "obrazy/" ',
default=default_path)
return parser.parse_args()
class ImagesProcessing:
"""
Class providing many functions to work on images, for example functions to create RGB or grayscale images from
images as binary objects and functions to save and plot these images.
This class can also store one image in case it is needed.
"""
def __init__(self):
self.internal_image = None
def create_rgba_image(self, binary_image, img_height, img_width):
"""
Function creates RGBA image from image in binary format, values of pixels of the image are from 0 do 255.
Tips:
Using matplotlib.pyplot: If you want to show this image on your own using matplotlib.pyplot remember
that you need to scale pixels' values to <0, 1>.
Using OpenCV: Pay attention that if you want to show this image using OpenCV, you will get image with reversed
colors because default format for OpenCV is BGR.
Using scipy.misc.imsave: To use scipy to save image pixels' value can be either <0, 1> or <0, 255>.
:param binary_image: image in binary format
:param img_width: width of image, can be accessed from videocamera parameter 'cam_width'
:param img_height: height of image, can be accessed from videocamera parameter 'cam_height'
:return: RGBA image in form of a numpy array with shape (img_height, img_width, 3) and with values of pixels from 0
to 255
"""
# Creating RGBA image - first method (the slowest) - 66 images in 20 seconds with saving
"""
Rather slow: creates 66 images in 20 seconds
RGBA_image = np.empty((img_width * img_height, 3))
for index in range(0, int(len(binary_image) / 4)):
RGB_image[index][0] = binary_image[4 * index + 0]
RGB_image[index][1] = binary_image[4 * index + 1]
RGB_image[index][2] = binary_image[4 * index + 2]
RGBA_image = np.reshape(RGBA_image, (img_height, img_width, 3))
# Changing format of image so it can be used by Open-CV functions and many more
RGBA_image = RGBA_image.astype(np.uint8)
"""
# Creating RGBA image - second method (the fastest) - 425-455 images in 20 seconds with saving
RGBA_image = np.ndarray(shape=(img_width, img_height, 4), buffer=binary_image, dtype='uint8')
# Creating RGBA image - third method (very fast but a bit slower than the second method) - 370-417 images in 20 seconds with saving
# RGBA_image = Image.frombuffer('RGBA', (img_width, img_height), image, 'raw', 'RGBA', 0, 1)
return RGBA_image
def create_rgb_image(self, binary_image, img_height, img_width):
"""
Function creates RGB image based on RGBA image created from image in binary format.
:param binary_image: image in binary format
:param img_width: width of image, can be accessed from videocamera parameter 'cam_width'
:param img_height: height of image, can be accessed from videocamera parameter 'cam_height'
:return: RGB image as numpy array
"""
# Creating RGB image - 600 images in 20 seconds
rgba_image = self.create_rgba_image(binary_image, img_height, img_width)
rgb_image = cv2.cvtColor(rgba_image, cv2.COLOR_RGBA2RGB)
return rgb_image
def create_gray_image(self, binary_image, img_height, img_width):
"""
Function creates 1 channel grayscale image based on RGBA image created from image in binary format.
Other methods are creating grayscale image from image in binary format using grayscale formula:
0.2126*R + 0.7152*G + 0.0722*B.
Values of pixels of the image are from 0 do 255.
Tips:
Using matplotlib.pyplot: If you want to plot this image on your own remember that because this is one channel
image, you have to manually choose colormap, in this case you should use "cmap='gray'" to get grayscale
image.
:param binary_image: image in binary format
:param img_width: width of image, can be accessed from videocamera parameter 'cam_width'
:param img_height: height of image, can be accessed from videocamera parameter 'cam_height'
:return: Grayscale image in form of a numpy array with shape (img_height, img_width, 1) and with values of pixels
from 0 to 255
"""
# First method - creating grayscale binary_image with list comprehension - saving 127 images in 20 seconds
"""
gray_image = [int(0.2126 * binary_image[index] + 0.7152 * binary_image[index + 1] + 0.0722 * binary_image[index + 2])
for index in range(0, len(binary_image), 4)]
np_gray_img = np.array(gray_image).reshape(img_height, img_width)
"""
# Second method - creating RGB image with very fast method and then converting it to grayscale with openCV
# Very fast - saving 1200 images in 20 seconds with saving
rgba_image = self.create_rgba_image(binary_image, img_height, img_width)
np_gray_img = cv2.cvtColor(rgba_image, cv2.COLOR_RGBA2GRAY)
return np_gray_img
def create_gray_image_3channels(self, binary_image, img_height, img_width):
"""
Function creates 3 channel grayscale image based on RGBA image created from image in binary format.
Other methods are creating grayscale image from image in binary format using grayscale formula:
0.2126*R + 0.7152*G + 0.0722*B.
Values of pixels of the image are from 0 do 255.
:param binary_image: image in binary format
:param img_width: width of image, can be accessed from videocamera parameter 'cam_width'
:param img_height: height of image, can be accessed from videocamera parameter 'cam_height'
:return: Grayscale image in form of a numpy array with shape (img_height, img_width, 3) and with values of pixels
from 0 to 255
"""
# First method - manually creating gray scale image with 3 channels - slow - saving 140-150 images in 20 seconds with saving
"""
# Creating grayscale binary_image with list comprehension
gray_image = np.array([int(0.2126 * binary_image[index] + 0.7152 * binary_image[index + 1] + 0.0722 * binary_image[index + 2])
for index in range(0, len(binary_image), 4)])
np_gray_img_3channels = np.zeros((img_height, img_width, 3))
np_gray_img_3channels[:,:,0] = gray_image.reshape(img_height, img_width)
np_gray_img_3channels[:,:,1] = gray_image.reshape(img_height, img_width)
np_gray_img_3channels[:,:,2] = gray_image.reshape(img_height, img_width)
"""
# Second method - creating 1 channel grayscale image using very fast method and then dupicating it to create
# 3 channel grayscale image - fast - saving 546 images in 20 seconds with saving
rgba_image = self.create_rgba_image(binary_image, img_height, img_width)
gray_image = cv2.cvtColor(rgba_image, cv2.COLOR_RGBA2GRAY)
np_gray_img_3channels = np.zeros((img_height, img_width, 3))
np_gray_img_3channels[:, :, 0] = gray_image.reshape(img_height, img_width)
np_gray_img_3channels[:, :, 1] = gray_image.reshape(img_height, img_width)
np_gray_img_3channels[:, :, 2] = gray_image.reshape(img_height, img_width)
return np_gray_img_3channels
def plot_rgb_image(self, image):
"""
Function that plots input RGB image using matplotlib.pyplot.plot.
:param image: Input RGB image that will be plotted
"""
image = image / 255
plt.imshow(image)
plt.show()
def plot_gray_image(self, image):
"""
Function that plots input grayscale image using matplotlib.pyplot.plot.
:param image: Input grayscale image that will be plotted
:return:
"""
image = image / 255
plt.imshow(image, cmap='gray')
plt.show()
def save_rgb_image_timestamp(self, image, path):
"""
Function that saves RGB image to .png file. You can specify in which folder image will be saved, but image name is
generated automatically based on timestamp.
:param image: Input RGB image as a numpy array.
:param path: Path to folder in which image will be saved. For example 'my_images/'
"""
image_path = path + 'rgb_image' + str(time.time()) + '.png'
scipy.misc.imsave(image_path, image)
def save_rgb_image_absolute_path(self, image, path):
"""
Function that saves RGB image to .png file with absolute path.
:param image: Input RGB image as a numpy array.
:param path: Absolute path where image will be saved
"""
scipy.misc.imsave(path, image)
def save_grayscale_image_timestamp(self, image, path):
"""
Function that saves Grayscale image to .png file. You can specify in which folder image will be saved, but image
name is generated automatically based on timestamp.
:param image: Input Grayscale image as a numpy array.
:param path: Path to folder in which image will be saved. For example 'my_images/'
"""
image_path = path + 'grayscale_image' + str(time.time()) + '.png'
scipy.misc.imsave(image_path, image)
def save_grayscale_image_absolute_path(self, image, path):
"""
Function that saves Grayscale image to .png file with absolute path.
:param image: Input Grayscale image as a numpy array.
:param path: Absolute path where image will be saved
"""
scipy.misc.imsave(path, image)
def create_color_vector(self, rgb_image, how_many_3d_points):
"""
Function that returns color vector used to color scatter plot of 3D points, color vector is extracted from
rgb image and its size depends on amount of points added to plot in current iteration of plotting live
:param rgb_image: 3 dimensional RGB image from which colors will be extracted
:param how_many_3d_points: amount of points received from Depth Camera in current iteration
:return: numpy array with size (how_many_3d_points, 3) storing RGB colors for each 3D points from current
iteration of plotting live
"""
dimension = int(sqrt(how_many_3d_points)) # Calculating approximate size of new image with size more
# compatible with 3D points
resized_image = cv2.resize(rgb_image, (dimension, dimension)) # Creating new image with size more compatible
# with 3D points
colors_array = np.reshape(resized_image, (dimension * dimension,
3)) # Creating 2 dimensional vector from 3 dimensional image, so
# the shape of this vector is more similar to shape of list with 3D points and consecutive RGB values of
# pixels are corresponding to consecutive points in cloud of 3D points.
colors_array = colors_array / 255 # Rescaling values of pixels from 0-255 to 0-1
# Resizing vector with values of colors so the shape of it is exactly the same as shape of list with 3D points,
# missing elements in new vector are fulfilled with copies of original vector
colors_array = np.resize(colors_array, (how_many_3d_points, 3))
return colors_array
if __name__ == "__main__":
main()
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from products_and_services_client.api.accounts_api import AccountsApi
from products_and_services_client.api.credit_cards_api import CreditCardsApi
from products_and_services_client.api.financings_api import FinancingsApi
from products_and_services_client.api.invoice_financings_api import InvoiceFinancingsApi
from products_and_services_client.api.loans_api import LoansApi
from products_and_services_client.api.unarranged_account_overdraft_api import UnarrangedAccountOverdraftApi
|
import sys
from mycroft import MycroftSkill, intent_file_handler
from Adafruit_IO import MQTTClient
ADAFRUIT_IO_KEY = 'aio_qKOt11nG7Bu0X9D1jEZLEg6VkEGE'
ADAFRUIT_IO_USERNAME = 'Kenzo16'
client = MQTTClient(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)
client.connect()
client.loop_background()
class Seconlambon(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('seconlambon.intent')
def handle_seconlambon(self, message):
self.speak_dialog('seconlambon')
client.publish('Lamb2', 1)
def create_skill():
return Seconlambon()
|
import os
import sys
from typing import Any, Callable, Dict, TextIO
from urllib.parse import urlparse
import click
import filelock
import structlog
from eth_utils import to_canonical_address, to_normalized_address
from web3 import HTTPProvider, Web3
from definitions import ROOT_DIR
import json
from eth_utils import encode_hex
from raiden.storage import serialize, sqlite
from raiden.accounts import AccountManager
from raiden.constants import (
MONITORING_BROADCASTING_ROOM,
PATH_FINDING_BROADCASTING_ROOM,
RAIDEN_DB_VERSION,
Environment,
RoutingMode,
)
from raiden.exceptions import RaidenError
from raiden.message_handler import MessageHandler
from raiden.network.blockchain_service import BlockChainService
from raiden.network.rpc.client import JSONRPCClient
from raiden.network.transport import MatrixTransport
from raiden.network.transport.matrix import MatrixLightClientTransport, NodeTransport
from raiden.raiden_event_handler import RaidenEventHandler
from raiden.settings import (
DEFAULT_MATRIX_KNOWN_SERVERS,
DEFAULT_NAT_KEEPALIVE_RETRIES,
DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS,
)
from raiden.ui.checks import (
check_ethereum_client_is_supported,
check_ethereum_has_accounts,
check_ethereum_network_id,
check_sql_version,
check_synced,
)
from raiden.ui.prompt import (
prompt_account,
unlock_account_with_passwordfile,
unlock_account_with_passwordprompt,
)
from raiden.ui.startup import (
setup_contracts_or_exit,
setup_environment,
setup_proxies_or_exit,
setup_udp_or_exit,
)
from raiden.utils import BlockNumber, pex, split_endpoint
from raiden.utils.cli import get_matrix_servers
from raiden.utils.typing import Address, Optional, PrivateKey, Tuple
from raiden_contracts.constants import ID_TO_NETWORKNAME
from raiden_contracts.contract_manager import ContractManager
log = structlog.get_logger(__name__)
def _setup_matrix(config):
if config["transport"]["matrix"].get("available_servers") is None:
# fetch list of known servers from raiden-network/raiden-tranport repo
available_servers_url = DEFAULT_MATRIX_KNOWN_SERVERS[config["environment_type"]]
available_servers = get_matrix_servers(available_servers_url)
log.debug("Fetching available matrix servers", available_servers=available_servers)
config["transport"]["matrix"]["available_servers"] = available_servers
# TODO: This needs to be adjusted once #3735 gets implemented
# Add PFS broadcast room if enabled
if config["services"]["pathfinding_service_address"] is not None:
if PATH_FINDING_BROADCASTING_ROOM not in config["transport"]["matrix"]["global_rooms"]:
config["transport"]["matrix"]["global_rooms"].append(PATH_FINDING_BROADCASTING_ROOM)
# Add monitoring service broadcast room if enabled
if config["services"]["monitoring_enabled"] is True:
config["transport"]["matrix"]["global_rooms"].append(MONITORING_BROADCASTING_ROOM)
try:
database_path = config["database_path"]
database_dir = os.path.dirname(config["database_path"])
os.makedirs(database_dir, exist_ok=True)
storage = sqlite.SerializedSQLiteStorage(
database_path=database_path, serializer=serialize.JSONSerializer()
)
light_clients = storage.get_all_light_clients()
light_client_transports = []
for light_client in light_clients:
light_client_transport = get_matrix_light_client_instance(config["transport"]["matrix"],
light_client['password'],
light_client['display_name'],
light_client['seed_retry'],
light_client['address'])
light_client_transports.append(light_client_transport)
hub_transport = MatrixTransport(config["transport"]["matrix"])
node_transport = NodeTransport(hub_transport, light_client_transports)
except RaidenError as ex:
click.secho(f"FATAL: {ex}", fg="red")
sys.exit(1)
return node_transport
def get_matrix_light_client_instance(config, password, display_name, seed_retry, address):
light_client_transport = MatrixLightClientTransport(config, password, display_name, seed_retry, address)
return light_client_transport
def _setup_web3(eth_rpc_endpoint):
web3 = Web3(HTTPProvider(eth_rpc_endpoint))
try:
node_version = web3.version.node # pylint: disable=no-member
except ConnectTimeout:
raise EthNodeCommunicationError("Couldn't connect to the ethereum node")
except ValueError:
raise EthNodeInterfaceError(
'The underlying ethereum node does not have the web3 rpc interface '
'enabled. Please run it with --rpcapi eth,net,web3,txpool for geth '
'and --jsonrpc-apis=eth,net,web3,parity for parity.',
)
supported, _ = is_supported_client(node_version)
if not supported:
click.secho(
'You need a Byzantium enabled ethereum node. Parity >= 1.7.6, Geth >= 1.7.2 or RSK -= 0.6.0',
fg='red',
)
sys.exit(1)
return web3
def get_account_and_private_key(
account_manager: AccountManager, address: Optional[Address], password_file: Optional[TextIO]
) -> Tuple[Address, PrivateKey]:
if not address:
address_hex = prompt_account(account_manager)
else:
address_hex = to_normalized_address(address)
if password_file:
privatekey_bin = unlock_account_with_passwordfile(
account_manager=account_manager, address_hex=address_hex, password_file=password_file
)
else:
privatekey_bin, pubkey_bin = unlock_account_with_passwordprompt(
account_manager=account_manager, address_hex=address_hex
)
return to_canonical_address(address_hex), privatekey_bin, pubkey_bin
def rpc_normalized_endpoint(eth_rpc_endpoint: str) -> str:
parsed_eth_rpc_endpoint = urlparse(eth_rpc_endpoint)
if parsed_eth_rpc_endpoint.scheme:
return eth_rpc_endpoint
return f"http://{eth_rpc_endpoint}"
def run_app(
address: Address,
keystore_path: str,
gas_price: Callable,
eth_rpc_endpoint: str,
tokennetwork_registry_contract_address: Address,
one_to_n_contract_address: Address,
secret_registry_contract_address: Address,
service_registry_contract_address: Address,
endpoint_registry_contract_address: Address,
user_deposit_contract_address: Address,
listen_address: str,
mapped_socket,
max_unresponsive_time: int,
api_address: str,
rpc: bool,
sync_check: bool,
console: bool,
password_file: TextIO,
web_ui: bool,
datadir: str,
transport: str,
matrix_server: str,
network_id: int,
environment_type: Environment,
unrecoverable_error_should_crash: bool,
pathfinding_service_address: str,
pathfinding_max_paths: int,
enable_monitoring: bool,
resolver_endpoint: str,
routing_mode: RoutingMode,
config: Dict[str, Any],
**kwargs: Any, # FIXME: not used here, but still receives stuff in smoketest
):
# pylint: disable=too-many-locals,too-many-branches,too-many-statements,unused-argument
from raiden.app import App
if transport == "udp" and not mapped_socket:
raise RuntimeError("Missing socket")
if datadir is None:
datadir = os.path.join(os.path.expanduser("~"), ".raiden")
account_manager = AccountManager(keystore_path)
web3 = Web3(HTTPProvider(rpc_normalized_endpoint(eth_rpc_endpoint)))
check_sql_version()
check_ethereum_has_accounts(account_manager)
check_ethereum_client_is_supported(web3)
check_ethereum_network_id(network_id, web3)
(address, privatekey_bin, pubkey_bin) = get_account_and_private_key(
account_manager, address, password_file
)
(listen_host, listen_port) = split_endpoint(listen_address)
(api_host, api_port) = split_endpoint(api_address)
print("Private key: " + encode_hex(privatekey_bin))
print("Public key: " + encode_hex(pubkey_bin))
config["pubkey"] = pubkey_bin
config["privatekey"] = privatekey_bin
config["transport"]["udp"]["host"] = listen_host
config["transport"]["udp"]["port"] = listen_port
config["console"] = console
config["rpc"] = rpc
config["web_ui"] = rpc and web_ui
config["api_host"] = api_host
config["api_port"] = api_port
config["resolver_endpoint"] = resolver_endpoint
if mapped_socket:
config["socket"] = mapped_socket.socket
config["transport"]["udp"]["external_ip"] = mapped_socket.external_ip
config["transport"]["udp"]["external_port"] = mapped_socket.external_port
config["transport_type"] = transport
config["transport"]["matrix"]["server"] = matrix_server
config["transport"]["udp"]["nat_keepalive_retries"] = DEFAULT_NAT_KEEPALIVE_RETRIES
timeout = max_unresponsive_time / DEFAULT_NAT_KEEPALIVE_RETRIES
config["transport"]["udp"]["nat_keepalive_timeout"] = timeout
config["unrecoverable_error_should_crash"] = unrecoverable_error_should_crash
config["services"]["pathfinding_max_paths"] = pathfinding_max_paths
config["services"]["monitoring_enabled"] = enable_monitoring
config["chain_id"] = network_id
setup_environment(config, environment_type)
contracts = setup_contracts_or_exit(config, network_id)
rpc_client = JSONRPCClient(
web3,
privatekey_bin,
gas_price_strategy=gas_price,
block_num_confirmations=DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS,
uses_infura="infura.io" in eth_rpc_endpoint,
)
blockchain_service = BlockChainService(
jsonrpc_client=rpc_client, contract_manager=ContractManager(config["contracts_path"])
)
if sync_check:
check_synced(blockchain_service)
proxies = setup_proxies_or_exit(
config=config,
tokennetwork_registry_contract_address=tokennetwork_registry_contract_address,
secret_registry_contract_address=secret_registry_contract_address,
endpoint_registry_contract_address=endpoint_registry_contract_address,
user_deposit_contract_address=user_deposit_contract_address,
service_registry_contract_address=service_registry_contract_address,
blockchain_service=blockchain_service,
contracts=contracts,
routing_mode=routing_mode,
pathfinding_service_address=pathfinding_service_address,
)
database_path = os.path.join(
datadir,
f"node_{pex(address)}",
f"netid_{network_id}",
f"network_{pex(proxies.token_network_registry.address)}",
f"v{RAIDEN_DB_VERSION}_log.db",
)
config["database_path"] = database_path
print(
"\nYou are connected to the '{}' network and the DB path is: {}".format(
ID_TO_NETWORKNAME.get(network_id, network_id), database_path
)
)
# FIXME mmartinez this must be checksummed or compared on a standard way
# running_network = {"network_id": network_id,
# "token_network_registry": encode_hex(tokennetwork_registry_contract_address),
# "secret_registry": encode_hex(secret_registry_contract_address),
# "endpoint_registry": encode_hex(endpoint_registry_contract_address)}
# check_network_params(running_network)
discovery = None
if transport == "udp":
transport, discovery = setup_udp_or_exit(
config, blockchain_service, address, contracts, endpoint_registry_contract_address
)
elif transport == "matrix":
transport = _setup_matrix(config)
else:
raise RuntimeError(f'Unknown transport type "{transport}" given')
raiden_event_handler = RaidenEventHandler()
message_handler = MessageHandler()
try:
start_block = 0
if "TokenNetworkRegistry" in contracts:
start_block = contracts["TokenNetworkRegistry"]["block_number"]
raiden_app = App(
config=config,
chain=blockchain_service,
query_start_block=BlockNumber(start_block),
default_one_to_n_address=one_to_n_contract_address,
default_registry=proxies.token_network_registry,
default_secret_registry=proxies.secret_registry,
default_service_registry=proxies.service_registry,
transport=transport,
raiden_event_handler=raiden_event_handler,
message_handler=message_handler,
discovery=discovery,
user_deposit=proxies.user_deposit,
)
except RaidenError as e:
click.secho(f"FATAL: {e}", fg="red")
sys.exit(1)
try:
raiden_app.start()
except RuntimeError as e:
click.secho(f"FATAL: {e}", fg="red")
sys.exit(1)
except filelock.Timeout:
name_or_id = ID_TO_NETWORKNAME.get(network_id, network_id)
click.secho(
f"FATAL: Another Raiden instance already running for account "
f"{to_normalized_address(address)} on network id {name_or_id}",
fg="red",
)
sys.exit(1)
return raiden_app
def check_network_params(running_network):
config_path = os.path.join(ROOT_DIR, 'config.json')
with open(config_path) as json_data_file:
config_data = json.load(json_data_file)
network_data = _get_network_info(running_network["network_id"], config_data)
if network_data:
# Running Mainnet or Testnet. Validate smart contracts addresses
if not validate_network_contracts(network_data, running_network):
click.secho(
"One or more of the specified smart contract addresses does not match with the configured ones",
fg="red",
)
sys.exit(1)
else:
# Running custom network
print("You are running a custom network")
def _get_network_info(network_id, config_data):
for network in config_data['networks'].values():
if network["network_id"] == network_id:
return network
return None
def validate_network_contracts(config_network, running_network):
if running_network['token_network_registry'] == config_network['token_network_registry'] and \
running_network['secret_registry'] == config_network['secret_registry'] and \
running_network['endpoint_registry'] == config_network['endpoint_registry']:
return True
return False
|
"""
Functions to setup a real and sample database
"""
# General imports
from peewee import *
from datetime import datetime as DT
import logging
from passlib.handlers.pbkdf2 import pbkdf2_sha256
# Internal imports
from models import db
from models.models import *
# Function to generate the tables in database
def create_schema():
""" Generates the tables in the database """
logging.info("Creating DB schema")
with db:
db.create_tables([User, KnownFace, Attendance])
logging.info("DB schema created.")
# Function to populate database with sample data
def populate_sample_data():
""" Populates the database with sample data """
with db:
u = User(login_id="test", password=pbkdf2_sha256.hash("test"), role="ST",
email="s1@junk.ss", first_name="TEST", last_name="USER", inst_id="CSB1000")
u.save()
u = User(login_id="admin", password=pbkdf2_sha256.hash("admin"), role="SU",
email="admin@admin.ss", first_name="ADMIN", last_name="USER", inst_id="CSB1001")
u.save()
# Function to setup demo database
def setup_demo_db():
""" Sets up a demo database """
create_schema()
populate_sample_data() |
"""
Comment Model
===========
Model for commenting on Practices and Lessons;
Always in a group under a user
"""
import logging
import os
import re
import config
import util
import mandrill
import searchable_properties as sndb
from .lesson import Lesson
from .model import Model
from .practice import Practice
from .theme import Theme
from .topic import Topic
from .email import Email
from .user import User
class Comment(Model):
"""Always in a group under a User."""
body = sndb.TextProperty(default='')
practice_id = sndb.StringProperty(default=None)
lesson_id = sndb.StringProperty(default=None)
# Overrides Model's default for this property, which is False. We always
# want to see comments.
listed = sndb.BooleanProperty(default=True)
@classmethod
def create(klass, **kwargs):
if ('practice_id' not in kwargs) and ('lesson_id' not in kwargs):
raise Exception('Must specify either a practice or a lesson when '
'creating a comment. Received kwargs: {}'
.format(kwargs))
comment = super(klass, klass).create(**kwargs)
# For email notifications
content_url = '/'
content = None
# Increment num_comments on Practice or Lesson if success
if comment.practice_id:
practice = Practice.get_by_id(comment.practice_id)
if practice is not None:
practice.num_comments += 1
practice.put()
# For email
content = practice
content_url = '/practices/{}'.format(practice.short_uid)
# Send email to creator
creator = practice.get_parent_user()
commenter = comment.get_parent_user()
# logic to not email yourself...
if creator.email != commenter.email:
short_name = creator.first_name or ''
full_name = creator.full_name
commenter_image_url = commenter.profile_image
# Uses Email model to queue email and prevent spam
email = Email.create(
to_address=creator.email,
subject="Someone commented on your Mindset Kit upload",
template="comment_creator_notification.html",
template_data={'short_name': short_name,
'full_name': full_name,
'commenter_name': commenter.full_name,
'commenter_image_url': commenter_image_url,
'content_name': content.name,
'comment_body': comment.body,
'content_url': content_url,
'domain': os.environ['HOSTING_DOMAIN']},
)
email.put()
# Send email to any users @replied to
usernames = re.search('\@(\w+)', comment.body)
if usernames is not None:
username = usernames.group(0).split('@')[1]
# Fetch user from username and send email message
replied_to = User.query(User.username == username).fetch(1)
if replied_to:
replied_to = replied_to[0]
short_name = replied_to.first_name or ''
full_name = replied_to.full_name
commenter_image_url = commenter.profile_image
# Uses Email model to queue email and prevent spam
email = Email.create(
to_address=replied_to.email,
subject="Someone replied to you on Mindset Kit",
template="comment_reply_notification.html",
template_data={
'short_name': short_name,
'full_name': full_name,
'commenter_name': commenter.full_name,
'commenter_image_url': commenter_image_url,
'content_name': content.name,
'comment_body': comment.body,
'content_url': content_url,
'domain': os.environ['HOSTING_DOMAIN']
},
)
email.put()
if comment.lesson_id:
lesson = Lesson.get_by_id(comment.lesson_id)
if lesson is not None:
lesson.num_comments += 1
lesson.put()
content = lesson
# Get first url for lesson for emailing
topics = Topic.get_by_id(lesson.topics)
theme = [theme for topic in topics for theme in topic.themes][0]
lesson_theme = Theme.get_by_id(theme)
content_url = '/' + lesson_theme.short_uid + '/' + topics[0].short_uid + '/' + lesson.short_uid
# Email interested team members that a comment has been created
mandrill.send(
to_address=config.comment_recipients,
subject="New Comment on Mindset Kit!",
template="comment_notification.html",
template_data={'comment': comment,
'user': comment.get_parent_user(),
'content': content,
'content_url': content_url,
'domain': os.environ['HOSTING_DOMAIN']},
)
logging.info('model.Comment queueing an email to: {}'
.format(config.comment_recipients))
return comment
@classmethod
def convert_uid(klass, short_or_long_uid):
"""Changes long-form uid's to short ones, and vice versa.
Overrides method provided in Model.
Long form example: Practice_Pb4g9gus.User_oha4tp8a
Short form exmaple: Pb4g9gusoha4tp8a
"""
if '.' in short_or_long_uid:
parts = short_or_long_uid.split('.')
return ''.join([x.split('_')[1] for x in parts])
else:
return 'Comment_{}.User_{}'.format(
short_or_long_uid[:8], short_or_long_uid[8:])
def parent_user_id(self):
return self.key.parent().id()
def get_parent_user(self):
return self.key.parent().get() |
from scipy.stats import uniform
import numpy as np
class mhmcmc:
def __init__(self,_func_prior_pdf, _func_prior_gen, _func_likelihood,_func_model_,
_func_kernel_gen, n_MC, dim,**kwargs):
self.n_MC = n_MC
self.dim = dim
self.x_MCMC = np.zeros (shape = (n_MC, dim))
self.prior_pdf = _func_prior_pdf
self.prior_gen = _func_prior_gen
self.likelihood = _func_likelihood
self.forward_model = _func_model_
self.kernel_gen = _func_kernel_gen
if 'loglikelihood' in kwargs:
self.loglikelihood = kwargs['loglikelihood']
else:
self.loglikelihood = False
if '_func_kernelRatio' in kwargs:
self.kernelRatio = kwargs['_func_kernelRatio']
else:
self.kernelRatio = self.unitRatio
def unitRatio(self, xi,xj):
return 1.
def run_MCMC(self, **kwargs): # kwargs: keep_rejected_point
if 'x_int' in kwargs:
self.x_MCMC[0,:] = kwargs['x_int']
else:
x = self.prior_gen()
self.x_MCMC[0,:] = x
print('initial value: ', self.x_MCMC[0,:])
y = self.forward_model(x)
self.y_MCMC = np.zeros (shape = (self.n_MC, y.size))
self.y_MCMC[0,:] = y
pdf_x_prev = self.prior_pdf (x)
pdf_epsilon_prev = self.likelihood (y)
pdf_x_curr = 0.
pdf_epsilon_curr = 0.
number_repeated_sample = 0
if 'keep_rejected_point' in kwargs:
self.x_computed = np.zeros (shape = (self.n_MC, self.dim))
self.x_computed [0,:] = x
self.y_computed = np.zeros (shape = (self.n_MC, y.size))
self.y_computed [0,:] = y
for i in range (1,self.n_MC):
if ((number_repeated_sample >= 100)): # start the chain again if no update for 100 steps
print ("MCMC is repeated form step ", i-number_repeated_sample, ' to step ', i,
" with value: ", self.x_MCMC[i-1,:])
print (pdf_x_curr, pdf_epsilon_curr, successful_update, ratio, x)
x = self.prior_gen ()
y = self.forward_model(x)
if self.loglikelihood:
while self.likelihood(y) < - 1e15:
x = self.prior_gen ()
y = self.forward_model(x)
else:
while self.likelihood(y) < 1.e-15:
x = self.prior_gen ()
y = self.forward_model(x)
number_repeated_sample = 0
if ((number_repeated_sample < 100)): # Generate new samples
x = self.kernel_gen(self.x_MCMC[i-1,:])
y = self.forward_model(x)
if 'keep_rejected_point' in kwargs:
self.x_computed[i] = x
self.y_computed[i] = y
pdf_x_curr = self.prior_pdf (x)
pdf_epsilon_curr = self.likelihood (y)
# Acept or refuse new sample
temp = uniform.rvs(size = 1)
if self.loglikelihood:
ratio = (pdf_x_curr + pdf_epsilon_curr) - (pdf_x_prev + pdf_epsilon_prev)
successful_update = (np.log(temp) <= ratio)
else:
if (pdf_x_prev*pdf_epsilon_prev):
kernelRatio = self.kernelRatio(self.x_MCMC[i-1,:],x)
ratio = (pdf_x_curr*pdf_epsilon_curr)/(pdf_x_prev*pdf_epsilon_prev)
ratio = ratio*kernelRatio
else:
ratio =1.
successful_update = (temp <= ratio)
if successful_update:
self.x_MCMC[i,:] = x
self.y_MCMC[i,:] = y
number_repeated_sample = 0
pdf_x_prev = pdf_x_curr
pdf_epsilon_prev = pdf_epsilon_curr
else:
self.x_MCMC [i,:] = self.x_MCMC[i-1,:]
self.y_MCMC [i,:] = self.y_MCMC[i-1,:]
number_repeated_sample = number_repeated_sample +1
if i%500 == 0:
print ("MCMC current step:", i )
print ("mean value", [self.x_MCMC[0:i,j].mean() for j in range(0,self.dim)] )
###############################################################################
## Ouput
###############################################################################
if self.dim == 1:
self.x_MCMC = self.x_MCMC.reshape((self.x_MCMC.size,))
self.y_MCMC = self.y_MCMC.reshape((self.y_MCMC.size,))
if 'keep_rejected_point' in kwargs:
return self.x_MCMC, self.x_computed #, self.std_converge_flag
else:
return self.x_MCMC,
|
import unittest
from image_searcher.interfaces.image_loader import ImageLoader
class TestLoader(unittest.TestCase):
def setUp(self):
self.loader = ImageLoader(image_dir_path="./tests/test_data")
def test_loader(self):
images = self.loader.search_tree()
self.assertTrue(len(images) > 5)
self.assertIsInstance(images, list)
|
#!/usr/bin/env python
import sys
import os
import rospy
import time
import rospkg
import unittest
rospack = rospkg.RosPack()
mission_control_path = rospack.get_path('mission_control')
sys.path.append("%s/src" % mission_control_path)
import behaviour
from mission_control_utils_cache import Cache
from std_msgs.msg import String
rospy.init_node('test_behaviour_on_token_passing', anonymous=True)
class TestBehaviourVariables(unittest.TestCase):
test_counter6_foo = None
test_counter6_foo_got = False
test_counter6_bar = None
test_counter6_bar_got = False
test_counter3_foo = None
test_counter3_foo_got = False
test_default = None
test_default_got = False
def counter6_foo_callback(self, data):
self.test_counter6_foo = data.data
self.test_counter6_foo_got = True
def counter6_bar_callback(self, data):
self.test_counter6_bar = data.data
self.test_counter6_bar_got = True
def counter3_foo_callback(self, data):
self.test_counter3_foo = data.data
self.test_counter3_foo_got = True
def default_callback(self, data):
self.test_default = data.data
self.test_default_got = True
def test_variable_counter6_foo(self):
rospy.Subscriber("/mission_control/test/variable/test_counter6_foo", String, self.counter6_foo_callback)
timeout_t = time.time() + 60.0
while not rospy.is_shutdown() and not self.test_counter6_foo_got and time.time() < timeout_t:
time.sleep(0.1)
self.assertTrue(self.test_counter6_foo == "10")
def test_variable_counter6_bar(self):
rospy.Subscriber("/mission_control/test/variable/test_counter6_bar", String, self.counter6_bar_callback)
timeout_t = time.time() + 60.0
while not rospy.is_shutdown() and not self.test_counter6_bar_got and time.time() < timeout_t:
time.sleep(0.1)
self.assertTrue(self.test_counter6_bar == "100")
def test_variable_counter3_foo(self):
rospy.Subscriber("/mission_control/test/variable/test_counter3_foo", String, self.counter3_foo_callback)
timeout_t = time.time() + 60.0
while not rospy.is_shutdown() and not self.test_counter3_foo_got and time.time() < timeout_t:
time.sleep(0.1)
self.assertTrue(self.test_counter3_foo == "40")
def test_variable_default(self):
rospy.Subscriber("/mission_control/test/variable/test_default", String, self.default_callback)
timeout_t = time.time() + 60.0
while not rospy.is_shutdown() and not self.test_default_got and time.time() < timeout_t:
time.sleep(0.1)
self.assertTrue(self.test_default == "777")
if __name__ == '__main__':
import rostest
rostest.rosrun('mission_control', 'test_behaviour_token_passing', TestBehaviourVariables)
|
"""Goal: Best set of guesses for the optimal attacker against a relaxed checker.
The best guesses are computed using greedy algorithm (and iterated greedy (TODO)
algoritm).
Returns: a file containing the list of guesses, and the guessing probability for
q=5,10,100,1000, q (user input)
"""
import json, os, sys, gc, random
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, base_dir)
import numpy as np
import time, datetime
from hashlib import md5
from collections import OrderedDict, defaultdict
from heap import priority_dict
import warnings
import csv
import joblib
from multiprocessing import Pool
rpw_done = set() # to refrain from reintroducing a ball
global_attacker_pwmodel = None # Only used in power
global_typofixer = None # Only used in power
def power(tpw):
global global_typofixer, global_attacker_pwmodel, rpw_done
return -sum(
global_attacker_pwmodel.prob(pw)
for pw in
((global_typofixer.check(tpw)|set([tpw]))-rpw_done)
)
def greedy_maxcoverage_heap(attacker_pwmodel, typofixer, q=100):
"""
Creates a list of q best guesses.
"""
print("Guessing for DETERMINISTIC typo correction")
global rpw_done, global_typofixer, global_attacker_pwmodel
global_typofixer = typofixer
global_attacker_pwmodel = attacker_pwmodel
subset_heap = priority_dict()
b = typofixer.max_ball_size # ball size
n = typofixer.max_nh_size # neighborhood size
pwlist = attacker_pwmodel.iterpasswords()
guess_list = []
tpw_done = set() # to make the probabiltiy zero,
l = 0
estimated_ball_weight = 0.0
normal_guesses = []
def totp(l):
return sum(attacker_pwmodel.prob(pw) for pw in l)
while len(guess_list) < q:
rpw, _ = next(pwlist)
if len(rpw)<6:
continue
if len(normal_guesses)<q:
normal_guesses.append(rpw)
p = attacker_pwmodel.prob(rpw)
if estimated_ball_weight <= 0:
# The weight of the heaviest ball in rpw's neighbor
estimated_ball_weight = p * b()
# if subset heap is not empty, take out the heaviest ball in it
if subset_heap:
[tpw, weight] = subset_heap.pop_smallest()
weight = -weight
if weight <= 0:
print("You have exhaused all the options")
break;
while weight >= b()*p > 0 and len(guess_list) < q:
# w = -sum(attacker_pwmodel.prob(pw) for pw in typofixer.get_ball(tpw)-done)
# assert w == weight, "{!r} ::= {} <---> {}".format(tpw, w, weight)
print("Guess {:04d}: {!r}, weight={}, new_ball={}, actual-cover={}" \
.format(len(guess_list), tpw, weight,
list((typofixer.check(tpw)|set([tpw])) - rpw_done)[:10],
power(tpw)))
guess_list.append(tpw)
tpw_done.add(tpw)
new_killed = (typofixer.check(tpw) | set([tpw])) - rpw_done
rpw_done |= new_killed
for rrpw in new_killed: # kill all these passwords
tp = attacker_pwmodel.prob(rrpw) # get its probability
# inform all its neighbors, rrpw is not in the nh of rrpw,
for ttpw in typofixer.get_nh(rrpw)|set([rrpw]):
if ttpw in subset_heap and typofixer.check(ttpw, rrpw):
subset_heap[ttpw] += tp
if subset_heap[ttpw] >= 0:
del subset_heap[ttpw]
if subset_heap:
tpw, weight = subset_heap.pop_smallest()
weight = -weight
else:
break;
if tpw and weight > 0 and tpw not in subset_heap and tpw not in tpw_done:
# assert tpw not in subset_heap
subset_heap[tpw] = -weight
# Insert the neighbors of this rpw
# all the neighbors including itself
# if it is not already dead or in the heap
all_nhs = [ttpw for ttpw in typofixer.get_nh(rpw) | set([rpw])
if (ttpw not in subset_heap) and (ttpw not in tpw_done)]
# weights = [power(ttpw) for ttpw in all_nhs]
with joblib.Parallel(n_jobs=7) as parallel:
weights = parallel(
joblib.delayed(power)(ttpw)
for ttpw in all_nhs
)
for ttpw, pwr in zip(all_nhs, weights):
subset_heap[ttpw] = pwr # -power(ttpw)
# mw = max(mw, subset_heap[ttpw])
# estimated_ball_weight = estimated_ball_weight * 0.8 - mw * 0.2
if len(subset_heap) > l:
print("Heap size: {0}".format(len(subset_heap)))
l = len(subset_heap) * 2
return {
'typo_guesslist': guess_list,
'normal_guesslist': normal_guesses,
'attacker_model': str(attacker_pwmodel),
'typofixer': str(typofixer)
}
################################################################################
from pwmodel import HistPw
from typofixer.checker import Checker, BUILT_IN_CHECKERS
OUTPUT_f = "guesslist-{}.json"
def compute_guesses_and_success_rate(checker, q, passwordFile, attackerFile):
q = int(q)
pwm = HistPw(passwordFile)
######################## Parameters #######################
attacker_pwmodel = HistPw(os.path.expanduser(attackerFile))
typofixer = BUILT_IN_CHECKERS[checker]
params = '{}-{}'.format(checker, q) # attacker's pw dist is always rockyou
############################################################
global OUTPUT_f
OUTPUT_f = OUTPUT_f.format(params)
if os.path.exists(OUTPUT_f):
A = json.load(open(OUTPUT_f))
else:
A = greedy_maxcoverage_heap(attacker_pwmodel, typofixer, q=q)
with open(OUTPUT_f.format(params), 'w') as logf:
json.dump(A, logf, indent=2)
typo_guesses = A['typo_guesslist']
normal_guesses = A['normal_guesslist']
print("\n{:*^60}".format("Security loss"))
print("{:>5s}, {:>9s}, {:>13s}, {:>8s}".format("q", "lambda_q", "lambda_t_q", "secloss"))
def totprob(l):
return sum(pwm.prob(pw) for pw in l)
for tq in [10, 100, 1000]:
if tq>q:
continue
ball = set()
for guess in typo_guesses[:tq]:
guesses = typofixer.check(guess)
for pw in guesses:
ball.add(pw)
# print "Passwords that are not queried:", set(normal_guesses[:tq]) - ball
# print "New passwords that will be compromised:", ball - set(normal_guesses[:tq])
lambda_q = totprob(normal_guesses[:tq])
lambda_tilde_q = totprob(ball)
print("{:5d}, {:-9.5f}, {:-13.5f}, {:-8.5f}" \
.format(tq, lambda_q, lambda_tilde_q, (lambda_tilde_q - lambda_q)))
def test_success_rate(real_pwm_f):
pwm = HistPw(real_pwm_f)
attacker_pwmodel = HistPw(os.path.expanduser('~/passwords/rockyou-withcount.txt.bz2'))
Q = [10, 100, 1000]
typofixer = BUILT_IN_CHECKERS['ChkBl_Top3']
tpw_data = json.load(open('coverage.log'))['guesslist'][:max(Q)]
tpwlist, tpwp_ = zip(*tpw_data)
print("Num guesses: {}, Total prob covered: {}".format(len(tpwp_), sum(tpwp_)))
def totprob(l):
return sum(pwm.prob(pw) for pw in l)
normal_guesses = [pw for pw, c in attacker_pwmodel.iterpasswords(n=len(tpwlist)*2) if len(pw)>=6][:len(tpwlist)]
print(real_pwm_f)
######################################### Debug ##############################
# assert len(normal_guesses) == len(tpwlist)
# done = set()
# nwp, tpwp = 0, 0
# for i, w in enumerate(normal_guesses):
# tpw = tpwlist[i]
# killing = (typofixer.get_ball(tpw) | set([tpw])) - done
# done |= killing
# weight = totprob(killing)
# nwp += pwm.prob(w)
# tpwp += weight
# if tpwp < nwp:
# print "{!r},{!r},{}<-->{}".format(w, tpw, pwm.prob(w), weight)
###########################################################################
# tpwp = pwm.prob_correction(tpwp)
# nwp = pwm.prob_correction(nwp)
# print tpwp, nwp, tpwp - nwp
for q in Q:
ball = typofixer.get_ball_union(tpwlist[:q])
print(set(normal_guesses[:q]) - ball)
lambda_q = totprob(normal_guesses[:q])
lambda_tilde_q = totprob(ball)
print("{:.5f}, {:.5f}, {:.5f}" \
.format(lambda_q, lambda_tilde_q, (lambda_tilde_q - lambda_q)))
if __name__ == "__main__":
# attacker_model = ""
# real_pw_model = ""
if len(sys.argv)<4:
print("""
You need to provide 3 things: a checker, a value of 'q', and a filename for real password distribution
e.g.: $ python {} ChkBl_Top3 10 ~/passwrods/rockyou-withcount.txt.bz2\n""".format(__file__) )
exit(1)
else:
chker = sys.argv[1]
q = int(sys.argv[2])
passwordFile = sys.argv[3]
attackerFile = sys.argv[4]
if chker not in BUILT_IN_CHECKERS:
print("Your cheker ({}) is not in my list. Please use one of the following.".format(chker))
print(BUILT_IN_CHECKERS.keys())
exit(2)
compute_guesses_and_success_rate(chker, q, passwordFile, attackerFile) |
# -*- coding: utf-8 -*-
import re
from shared_visualize import visualize_procedure,VISUALIZE
from collections import OrderedDict
from math import pi
MAKE_ANIM = True
included_re = re.compile("Included n([0-9]+) to route set ([0-9]+)")
rays_re = re.compile("Considering n([0-9]+) between rays (-?[0-9]+\.[0-9]+), (-?[0-9]+\.[0-9]+)")
added_re = re.compile("Added n([0-9]+) to the route set")
tsp_re = re.compile("Got TSP solution (\[.*?\]) \((-?[0-9]+\.[0-9]+)\)")
route_set_re = re.compile("Route set .*?(\[.*?\]) full")
L_violation_re = re.compile("L constraint was violated, removed n([0-9]+) from the route set")
# global state
ray_cache = OrderedDict()
def _process_debug_line(line, normalization_parameters, currentK,
#output here
rays, active_nodes, active_ray_idxs,
points_of_interest,
candidate_routes, infeasible_routes, complete_routes,
labels):
global ray_cache
if "Do a sweep from" in line:
# start a new sweep
ray_cache = OrderedDict()
return True, 0
rays_match = rays_re.search(line)
if rays_match:
node = float( rays_match.group(1) )
l = float( rays_match.group(2) )
r = float( rays_match.group(3) )
ray_cache[node] = r
if len(rays)==0:
if (l>r):
l-=2*pi
rays.append(l)
active_ray_idxs.append(0)
# first ray, show
return True, None
else:
# do not update, because this node might not belong to the current group
return False, None
added_match = added_re.search(line)
if added_match:
node = int( added_match.group(1) )
active_nodes[:] = []
active_nodes.append(node)
if active_ray_idxs[-1]==len(rays)-1:
rays.append(ray_cache[node])
else:
rays[-1] = ray_cache[node]
return True, None
rs_match = route_set_re.search(line)
if rs_match:
active_nodes[:] = eval( rs_match.group(1) )
active_ray_idxs.append(len(rays)-1)
return True, None
tsp_match = tsp_re.search(line)
if tsp_match:
#rays[-1] = ray_cache[ray_cache.keys()[-1]]
active_nodes[:] = []
infeasible_routes[:] = []
route = eval(tsp_match.group(1))
candidate_routes.append(route)
return True, None
L_violation_match = L_violation_re.search(line)
if L_violation_match and len(candidate_routes)>0:
node_to_remove = int(L_violation_match.group(1))
active_nodes[:] = [node_to_remove]
violating_route = candidate_routes[-1]
del candidate_routes[-1]
infeasible_routes.append(violating_route)
# sweeps back
ray_to_prev = False
for node, ray in reversed(ray_cache.items()):
if ray_to_prev:
rays[-1] = ray
break
if node == node_to_remove:
ray_to_prev = True
return True, None
if "Route completed" in line:
complete_route = eval( line[line.find('['):] )
complete_routes.append( complete_route )
candidate_routes[:] = []
infeasible_routes[:] = []
active_ray_idxs.append( len(rays)-1 )
return True, None
#print "VSW:", line,
#print "VSW:", changed, newK
return False, None
if __name__=="__main__":
visualize_procedure("sweep", selector=VISUALIZE.ALL, make_anim=MAKE_ANIM,
process_debug_line_callback = _process_debug_line) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Ryan L. Collins <rlcollins@g.harvard.edu>
# and the Talkowski Laboratory
# Distributed under terms of the MIT license.
"""
Count & weight CNVs overlapping genes
"""
import pybedtools as pbt
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
import argparse
from sys import stdout
from os import path
import subprocess
import gzip
def process_cnvs(bedpath, pad_controls, case_hpos, control_hpo, max_size=None):
"""
Read CNVs & extend control CNV breakpoints by a specified distance
"""
cnvbt = pbt.BedTool(bedpath)
def _pad_control_cnv(feature, pad_controls, control_hpo):
"""
Add a fixed distance to control CNV breakpoints
"""
if feature[5] == control_hpo:
feature.start = max([0, feature.start-pad_controls])
feature.stop = feature.stop+pad_controls
return feature
cnvbt = cnvbt.each(_pad_control_cnv, pad_controls, control_hpo)
# Filter on max size, if optioned
if max_size is not None:
cnvbt = cnvbt.filter(lambda x: x.length <= max_size)
def _filter_phenos(feature, case_hpos, control_hpo):
"""
Filter CNVs based on phenotype
"""
if 'KEEP_ALL_SAMPLES' in case_hpos:
# Keep everything if no case hpo is specified
return True
else:
keep_hpos = case_hpos + [control_hpo]
cnv_hpos = feature[5].split(';')
if len(set(keep_hpos).intersection(set(cnv_hpos))) > 0:
return True
else:
return False
cnvbt = cnvbt.filter(_filter_phenos, case_hpos, control_hpo).saveas()
return cnvbt
def process_gtf(gtf_in, bl_list, xcov=0.3):
"""
Read gtf, format entries, and compute various metadata
"""
gtfbt = pbt.BedTool(gtf_in)
# Build lists of eligible gene names and transcript IDs
genes, transcripts = [], []
for f in gtfbt:
if f.fields[2] == 'transcript':
gname = f.attrs['gene_name']
tname = f.attrs['transcript_id']
if gname not in genes:
genes.append(gname)
if tname not in transcripts:
transcripts.append(tname)
# Filter & clean records in gtf
def _filter_gtf(feature):
"""
Restrict GTF features to desired elements
"""
if feature.fields[2] in 'exon transcript'.split() \
and feature.attrs['gene_name'] in genes \
and feature.attrs['transcript_id'] in transcripts:
return True
else:
return False
attrs_to_drop = 'gene_id gene_type gene_status transcript_type ' + \
'transcript_status transcript_name protein_id ' + \
'tag ccdsid havana_gene havana_transcript'
attrs_to_drop = attrs_to_drop.split()
def _clean_feature(feature):
"""
Clean unnecessary fields & info from GTF features
"""
for key in attrs_to_drop:
if key in feature.attrs.keys():
feature.attrs.pop(key)
return feature
gtfbt = gtfbt.filter(_filter_gtf).filter(_clean_feature).saveas()
def _blacklist_genes(bt, bl, xcov):
"""
Remove genes based on overlap with blacklist
"""
txcov = bt.coverage(bl).filter(lambda x: x[2] == 'transcript')
keepers = [x.attrs['gene_name'] for x in txcov if float(x.fields[-1]) < xcov]
return bt.filter(lambda x: x.attrs['gene_name'] in keepers)
# Filter genes based on blacklist overlap
if bl_list is not None:
for bl in bl_list:
gtfbt = _blacklist_genes(gtfbt, bl, xcov).saveas()
keepers = list(set([x.attrs['gene_name'] for x in gtfbt]))
genes = [g for g in genes if g in keepers]
transcripts = [g for g in transcripts if g in keepers]
# Build dictionary of cds lengths per gene
cds_dict = {}
for e in gtfbt.filter(lambda x: x.fields[2] == 'exon'):
gname = e.attrs['gene_name']
if gname not in cds_dict.keys():
cds_dict[gname] = e.length
else:
cds_dict[gname] += e.length
# Make separate BedTools for exons and transcripts
txbt = gtfbt.filter(lambda x: x.fields[2] == 'transcript').saveas()
exonbt = gtfbt.filter(lambda x: x.fields[2] == 'exon').saveas()
return gtfbt, txbt, exonbt, genes, transcripts, cds_dict
def overlap_cnvs_exons(cnvbt, exonbt, cds_dict, weight_mode, min_cds_ovr, max_genes):
"""
Compute fraction of CDS overlapped per gene for each CNV
"""
cnvs_per_gene = {}
cnv_cds_sums = {}
for i in cnvbt.intersect(exonbt, wo=True):
# Get basic feature-intersection info
cnvid = i[3]
exinfo = i[14].strip(';').split(';')
exinfo_name = [x for x in exinfo if x.startswith('gene_name ')]
gene = exinfo_name[0].split(' ')[1].replace('"', '')
ovrlen = int(i[-1])
# Add CNV ID to list for gene, if necessary
if gene not in cnvs_per_gene.keys():
cnvs_per_gene[gene] = []
if cnvid not in cnvs_per_gene[gene]:
cnvs_per_gene[gene].append(cnvid)
# Add bp overlap to list for CNV
if cnvid not in cnv_cds_sums:
cnv_cds_sums[cnvid] = {}
if gene not in cnv_cds_sums[cnvid].keys():
cnv_cds_sums[cnvid][gene] = ovrlen
else:
cnv_cds_sums[cnvid][gene] += ovrlen
# Scale cds overlap as fraction of total gene per CNV
cnv_weights = {}
for cnvid in cnv_cds_sums.keys():
if cnvid not in cnv_weights.keys():
cnv_weights[cnvid] = {}
for gene, ovrbp in cnv_cds_sums[cnvid].items():
cds_ovr = ovrbp / cds_dict[gene]
if cds_ovr >= min_cds_ovr:
cnv_weights[cnvid][gene] = cds_ovr
# If overlap is less than required, remove cnv from cnvs_per_gene
else:
cnvs_per_gene[gene] = [c for c in cnvs_per_gene[gene] if c != cnvid]
# Weight CNVs
for cnvid in cnv_cds_sums.keys():
if len(cnv_weights[cnvid]) > 0:
wsum = sum(cnv_weights[cnvid].values())
# Skip to next CNV if no weighting is specified
if weight_mode is None:
continue
# Otherwise, weight genes by CDS overlap
if weight_mode == 'weak' \
or wsum < 1:
cnv_weights[cnvid] = {gene: w / wsum for gene, w
in cnv_weights[cnvid].items()}
elif weight_mode == 'strong':
cnv_weights[cnvid] = {gene: w / (2 ** (wsum - 1)) for gene, w
in cnv_weights[cnvid].items()}
# Exclude CNVs based on overlapping more than max_genes
cnvs_to_exclude = [cid for cid in cnv_cds_sums.keys() \
if len(cnv_weights.get(cid, [])) > max_genes]
# Collapse counts per gene
raw_counts = {gene: len([x for x in cnvs if x not in cnvs_to_exclude]) \
for gene, cnvs in cnvs_per_gene.items()}
weighted_counts = {}
for cnv, weights in cnv_weights.items():
if len(weights) > 0 and cnv not in cnvs_to_exclude:
for gene, w in weights.items():
if gene not in weighted_counts.keys():
weighted_counts[gene] = w
else:
weighted_counts[gene] += w
return raw_counts, weighted_counts, cnv_weights
def get_bayes_weights(case_cnvbt, case_cnv_weights, control_cnvbt, control_cnv_weights):
"""
Compute weighted pseudocounts as a function of size & number of genes
"""
# Prepare data
case_vals = [(1, x.length, np.sum(list(case_cnv_weights.get(x.name, {}).values())), x.name)
for x in case_cnvbt]
control_vals = [(0, x.length, np.sum(list(control_cnv_weights.get(x.name, {}).values())), x.name)
for x in control_cnvbt]
df = pd.DataFrame(case_vals + control_vals, columns='pheno size genes cnvid'.split())
# Fit logistic regression
x = df.loc[:, 'size genes'.split()]
y = df['pheno']
model = LogisticRegression(solver='saga', random_state=0, penalty='none',
max_iter=1000).fit(x, y)
probs = pd.DataFrame(model.predict_proba(x), columns='p_control p_case'.split())
df = pd.concat([df, probs], axis=1)
# Apply pseudocount weights to genes for each CNV
# Pseudocounts normalized to median = 1 independently for cases & controls
case_scalar = np.median(2 * (1 - df.loc[df['pheno'] == 1, 'p_case']))
for cnvid in case_cnv_weights.keys():
pcase = np.float(df.loc[df['cnvid'] == cnvid, 'p_case'])
pcount = (2 * (1 - pcase)) / case_scalar
for gene, frac in case_cnv_weights[cnvid].items():
case_cnv_weights[cnvid][gene] = frac * pcount
control_scalar = np.median(2 * df.loc[df['pheno'] == 0, 'p_case'])
for cnvid in control_cnv_weights.keys():
pcase = np.float(df.loc[df['cnvid'] == cnvid, 'p_case'])
pcount = (2 * pcase) / control_scalar
for gene, frac in control_cnv_weights[cnvid].items():
control_cnv_weights[cnvid][gene] = frac * pcount
# Sum weights per gene
case_weights = {}
for cnv, weights in case_cnv_weights.items():
if len(weights) > 0:
for gene, w in weights.items():
if gene not in case_weights.keys():
case_weights[gene] = w
else:
case_weights[gene] += w
control_weights = {}
for cnv, weights in control_cnv_weights.items():
if len(weights) > 0:
for gene, w in weights.items():
if gene not in control_weights.keys():
control_weights[gene] = w
else:
control_weights[gene] += w
return case_weights, case_cnv_weights, control_weights, control_cnv_weights
def make_output_table(outbed, txbt, genes, cds_dict, control_counts,
control_weights, case_counts, case_weights):
"""
Format master table of counts per gene and write to outbed
"""
h_cols = '#chr start end gene cds control_cnvs control_cnvs_weighted ' + \
'case_cnvs case_cnvs_weighted'
header = '\t'.join(h_cols.split())
outbed.write(header + '\n')
for i in txbt:
gene_id = i.attrs['gene_name']
gline = [i.chrom, i.start, i.end, gene_id, cds_dict.get(gene_id, 'NA'),
control_counts.get(gene_id, 0), control_weights.get(gene_id, 0),
case_counts.get(gene_id, 0), case_weights.get(gene_id, 0)]
gline_str = '\t'.join([str(x) for x in gline]) + '\n'
outbed.write(gline_str)
# Must close output file to flush buffer (end of chr22 sometimes gets clipped)
outbed.close()
def write_annotated_cnvs(cnvbt, cnvs_out, case_cnv_weights, control_cnv_weights,
control_hpo='HEALTHY_CONTROL', annotate_cds=False,
max_float=6):
"""
Format & write BED file of CNVs annotated with gene overlaps
"""
# Write header to output file
hcols = '#chr start end cnvid cnv phenos ngenes total_weight genes'
if annotate_cds:
hcols += ' cds_per_gene'
cnvs_out.write('\t'.join(hcols.split()) + '\n')
for cnv in cnvbt:
cnvid = cnv.name
if cnv.fields[-1] == control_hpo:
cnv_dict = control_cnv_weights
else:
cnv_dict = case_cnv_weights
hits = cnv_dict.get(cnvid, {})
ngenes = len(hits.keys())
sumweight = np.nansum(list(hits.values()))
sorted_genes = sorted(hits.keys())
genes = ';'.join(sorted_genes)
outfields = cnv.fields + [str(ngenes), str(sumweight), genes]
if annotate_cds:
sorted_cds = [str(round(hits[g], max_float)) for g in sorted_genes]
outfields.append(';'.join(sorted_cds))
cnvs_out.write('\t'.join(outfields) + '\n')
def main():
"""
Main block
"""
# Parse command line arguments and options
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('cnvs', help='CNV BED file to compare vs GTF.')
parser.add_argument('gtf', help='GTF of genes to consider.')
parser.add_argument('--pad-controls', help='Distance to be added to control ' +
'breakpoints. [default: 0]',
type=float, default=0)
parser.add_argument('--weight-mode', help='Specify behavior for distributing ' +
'weight for multi-gene CNVs. [default: no weighting]',
choices=['weak', 'strong', 'bayesian'], default=None)
parser.add_argument('--min-cds-ovr', help='Minimum coding sequence overlap ' +
'to consider a CNV gene-overlapping. [default: 0.2]',
type=float, default=0.2)
parser.add_argument('--max-cnv-size', help='Maximum CNV size to be included. ' +
'[default: No limit]', type=int, default=None)
parser.add_argument('--max-genes', help='Maximum number of genes overlapped by ' +
'a CNV before being the CNV is excluded. [default: 20000]',
type=int, default=20000)
parser.add_argument('--max-genes-in-cases-only', action='store_true',
help='Only apply --max-genes to case CNVs. [default: ' +
'apply to both case & control CNVs]')
parser.add_argument('-t', '--type', help='Type of CNV to include (DEL/DUP). ' +
'[default: all]')
parser.add_argument('--hpo', help='HPO term to consider for case samples. ' +
'If no --hpo is supplied, will count all CNVs ' +
'irrespective of phenotype. Can also provide a semicolon-' +
'delimited list of HPOs to consider at once.')
parser.add_argument('--control-hpo', default='HEALTHY_CONTROL', help='HPO code ' +
'to use for control CNV counts. [default: HEALTHY_CONTROL]',
dest='control_hpo')
parser.add_argument('-x', '--blacklist', action='append', help='BED file ' +
'containing regions to blacklist based on overlap with ' +
'genes. May be specified multiple times.')
parser.add_argument('--xcov', type=float, help='Maximum coverage ' +
'by any blacklist before excluding a gene. [default: 0.3]',
default=0.3)
parser.add_argument('-o', '--outbed', help='Path to output file. ' +
'[default: stdout]')
parser.add_argument('--cnvs-out', help='Path to output BED file for CNVs ' +
'annotated with genes disrupted.')
parser.add_argument('--annotate-cds-per-gene', default=False, action='store_true',
help='Append CDS overlapped per gene to CNV BED output file.')
parser.add_argument('-z', '--bgzip', dest='bgzip', action='store_true',
help='Compress output BED with bgzip.')
parser.add_argument('--bgzip-cnvs-out', dest='bgzip_cnvs_out', action='store_true',
help='Compress --cnvs-out BED with bgzip.')
parser.add_argument('-v', '--verbose', action='store_true', help='Print ' +
'diagnostics.')
args = parser.parse_args()
# Open connection to output file
if args.outbed is None \
or args.outbed in 'stdout -'.split():
outbed = stdout
else:
if path.splitext(args.outbed)[-1] in '.gz .bz .bgz .bgzip .gzip'.split():
outbed_path = path.splitext(args.outbed)[0]
else:
outbed_path = args.outbed
outbed = open(outbed_path, 'w')
if args.hpo is None:
case_hpos = ['KEEP_ALL_SAMPLES']
else:
case_hpos = args.hpo.split(';')
# Process input CNV BED file
cnvbt = process_cnvs(args.cnvs, args.pad_controls, case_hpos, args.control_hpo,
args.max_cnv_size)
if args.type is not None:
if args.type != 'CNV':
cnvbt = cnvbt.filter(lambda x: args.type in x.fields).saveas()
# Extract relevant data from input GTF
gtfbt, txbt, exonbt, genes, transcripts, cds_dict \
= process_gtf(args.gtf, args.blacklist, args.xcov)
if args.verbose:
msg = 'Loaded {:,} {} from input gtf'
print(msg.format(len(txbt), 'transcripts'))
print(msg.format(len(exonbt), 'exons'))
print(msg.format(len(genes), 'gene symbols'))
# Intersect CNVs with exons
case_cnvbt = cnvbt.filter(lambda x: args.control_hpo not in x[5].split(';')).saveas()
case_counts, case_weights, case_cnv_weights \
= overlap_cnvs_exons(case_cnvbt, exonbt, cds_dict, args.weight_mode,
args.min_cds_ovr, args.max_genes)
max_genes_controls = args.max_genes
if args.max_genes_in_cases_only:
max_genes_controls = 20000
control_cnvbt = cnvbt.filter(lambda x: args.control_hpo in x[5].split(';')).saveas()
control_counts, control_weights, control_cnv_weights \
= overlap_cnvs_exons(control_cnvbt, exonbt, cds_dict, args.weight_mode,
args.min_cds_ovr, max_genes_controls)
# Compute Bayesian weights, if optioned
if args.weight_mode == 'bayesian' \
and len(case_cnv_weights) > 0 \
and len(control_cnv_weights) > 0:
case_weights, case_cnv_weights, control_weights, control_cnv_weights \
= get_bayes_weights(case_cnvbt, case_cnv_weights, control_cnvbt, control_cnv_weights)
# Format output main counts table and write to outfile
make_output_table(outbed, txbt, genes, cds_dict, control_counts,
control_weights, case_counts, case_weights)
if args.outbed is not None \
and args.outbed not in 'stdout -'.split() \
and args.bgzip:
subprocess.run(['bgzip', '-f', outbed_path])
# If optioned, format annotated CNV BED file and write to args.cnvs_out
if args.cnvs_out is not None:
if path.splitext(args.cnvs_out)[-1] in '.gz .bz .bgz .bgzip .gzip'.split():
cnvs_outpath = path.splitext(args.cnvs_out)[0]
bgzip_cnvs_out = True
else:
cnvs_outpath = args.cnvs_out
bgzip_cnvs_out = args.bgzip_cnvs_out
cnvs_out = open(cnvs_outpath, 'w')
write_annotated_cnvs(cnvbt, cnvs_out, case_cnv_weights,
control_cnv_weights, args.control_hpo,
args.annotate_cds_per_gene)
if bgzip_cnvs_out:
subprocess.run(['bgzip', '-f', cnvs_outpath])
if __name__ == '__main__':
main()
|
import syft
import torch
# interface
def test_sharing(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
plain_text = torch.tensor([3, 7, 11])
secret = plain_text.share(bob, alice, james, protocol="falcon")
assert isinstance(secret.child, syft.ReplicatedSharingTensor)
assert type(secret.child.child) == dict
def test_reconstruction(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
plain_text = torch.tensor([3, -7, 11])
secret = plain_text.share(bob, alice, james, protocol="falcon", field=2 ** 5)
decryption = secret.reconstruct()
assert (plain_text == decryption).all()
def test_private_add(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor([7, 4]).share(bob, alice, james, protocol="falcon")
y = torch.tensor([-2, 5]).share(bob, alice, james, protocol="falcon")
assert torch.allclose(x.add(y).reconstruct(), torch.tensor([5, 9]))
def test_public_add(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor([7, 4]).share(bob, alice, james, protocol="falcon")
y = torch.Tensor([-2, 5])
assert torch.allclose(x.add(y).reconstruct(), torch.tensor([5, 9]))
def test_reversed_add(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor([7, 4]).share(bob, alice, james, protocol="falcon")
y = 1
assert torch.allclose((y + x).reconstruct(), torch.tensor([8, 5]))
def test_private_sub(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor(7).share(bob, alice, james, protocol="falcon")
y = torch.tensor(3).share(bob, alice, james, protocol="falcon")
assert x.sub(y).reconstruct() == 4
def test_public_sub(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor(7).share(bob, alice, james, protocol="falcon")
y = 3
assert x.sub(y).reconstruct() == 4
def test_reversed_sub(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor([7, 4]).share(bob, alice, james, protocol="falcon")
y = 1
assert torch.allclose((y - x).reconstruct(), torch.tensor([-6, -3]))
def test_add_with_operator(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor([7, 4]).share(bob, alice, james, protocol="falcon")
y = torch.Tensor([2, 5])
assert torch.allclose((x + y).reconstruct(), torch.tensor([9, 9]))
def test_public_mul(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor([7, -4]).share(bob, alice, james, protocol="falcon")
assert torch.allclose((x * 2).reconstruct(), torch.tensor([14, -8]))
def test_reversed_mul(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor([7, -4]).share(bob, alice, james, protocol="falcon")
assert torch.allclose((2 * x).reconstruct(), torch.tensor([14, -8]))
def test_private_mul(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor([3, -5]).share(bob, alice, james, protocol="falcon")
y = torch.tensor([5, -2]).share(bob, alice, james, protocol="falcon")
assert torch.allclose((x * y).reconstruct(), torch.tensor([15, 10]))
def test_public_matmul(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor([[1, 2], [3, 4]]).share(bob, alice, james, protocol="falcon")
y = torch.tensor([[1, 2], [1, 2]])
assert torch.allclose((x.matmul(y)).reconstruct(), torch.tensor([[3, 6], [7, 14]]))
def test_private_matmul(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor([[1, 2], [3, 4]]).share(bob, alice, james, protocol="falcon")
y = torch.tensor([[1, 2], [1, 2]]).share(bob, alice, james, protocol="falcon")
assert torch.allclose((x.matmul(y)).reconstruct(), torch.tensor([[3, 6], [7, 14]]))
def test_get_shape(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor([[1, 2], [3, 4]])
shape = x.shape
x = x.share(bob, alice, james, protocol="falcon")
enc_shape = x.shape
assert shape == enc_shape
def test_get_players(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor([[1, 2], [3, 4]]).share(bob, alice, james, protocol="falcon")
assert x.players == [bob, alice, james]
def test_view(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.rand([2, 1]).long().share(bob, alice, james, protocol="falcon")
x = x.view([1, 2])
assert x.shape == torch.Size([1, 2])
# corner cases
def test_consecutive_arithmetic(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor([1, 2]).share(bob, alice, james, protocol="falcon")
y = torch.tensor([1, 2]).share(bob, alice, james, protocol="falcon")
z = x * x + y * 2 - x * 4
assert torch.allclose(z.reconstruct(), torch.tensor([-1, 0]))
def test_negative_result(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
x = torch.tensor(7).share(bob, alice, james, protocol="falcon")
y = torch.tensor(3).share(bob, alice, james, protocol="falcon")
assert y.sub(x).reconstruct() == -4
# utility functions
def test_shares_number():
tensor = syft.ReplicatedSharingTensor()
secret = torch.tensor(7)
number_of_shares = 4
shares = tensor.generate_shares(secret, number_of_shares)
assert len(shares) == number_of_shares
def test_workers_arrangement(workers):
me, bob, alice = (workers["me"], workers["bob"], workers["alice"])
x = torch.tensor(7).share(bob, alice, me, protocol="falcon")
assert x.players[0] == me
def test_fixed_precision_and_sharing(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
t = torch.tensor([3.25, 6.83, 8.21, 5.506])
x = t.fix_prec().share(bob, alice, james, protocol="falcon")
out = x.reconstruct().float_prec()
assert torch.allclose(out, t)
def test_add(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
# 3 workers
t = torch.tensor([1, 2, 3])
x = torch.tensor([1, 2, 3]).share(bob, alice, james, protocol="falcon")
y = (x + x).reconstruct()
assert torch.allclose(y, (t + t))
# negative numbers
t = torch.tensor([1, -2, 3])
x = torch.tensor([1, -2, 3]).share(bob, alice, james, protocol="falcon")
y = (x + x).reconstruct()
assert torch.allclose(y, (t + t))
# with fixed precisions
t = torch.tensor([1.0, -2, 3])
x = torch.tensor([1.0, -2, 3]).fix_prec().share(bob, alice, james, protocol="falcon")
y = (x + x).reconstruct().float_prec()
assert torch.allclose(y, (t + t))
# with FPT>torch.tensor
t = torch.tensor([1.0, -2.0, 3.0])
x = t.fix_prec().share(bob, alice, james, protocol="falcon")
y = t.fix_prec()
z = (x + y).reconstruct().float_prec()
assert torch.allclose(z, (t + t))
z = (y + x).reconstruct().float_prec()
assert torch.allclose(z, (t + t))
# with constant integer
t = torch.tensor([1.0, -2.0, 3.0])
x = t.fix_prec().share(alice, bob, james, protocol="falcon")
c = 4
z = (x + c).reconstruct().float_prec()
assert torch.allclose(z, (t + c))
z = (c + x).reconstruct().float_prec()
assert torch.allclose(z, (c + t))
# with constant float
t = torch.tensor([1.0, -2.0, 3.0])
x = t.fix_prec().share(alice, bob, james, protocol="falcon")
c = 4.2
z = (x + c).reconstruct().float_prec()
assert torch.allclose(z, (t + c))
z = (c + x).reconstruct().float_prec()
assert torch.allclose(z, (c + t))
def test_sub(workers):
bob, alice, james = (workers["bob"], workers["alice"], workers["james"])
t = torch.tensor([1, 2, 3])
x = torch.tensor([1, 2, 3]).share(bob, alice, james, protocol="falcon")
y = (x - x).reconstruct()
assert torch.allclose(y, (t - t))
# negative numbers
t = torch.tensor([1, -2, 3])
x = torch.tensor([1, -2, 3]).share(bob, alice, james, protocol="falcon")
y = (x - x).reconstruct()
assert torch.allclose(y, (t - t))
# with fixed precision
t = torch.tensor([1.0, -2, 3])
x = torch.tensor([1.0, -2, 3]).fix_prec().share(bob, alice, james, protocol="falcon")
y = (x - x).reconstruct().float_prec()
assert torch.allclose(y, (t - t))
# with FPT>torch.tensor
t = torch.tensor([1.0, -2.0, 3.0])
u = torch.tensor([4.0, 3.0, 2.0])
x = t.fix_prec().share(bob, alice, james, protocol="falcon")
y = u.fix_prec()
z = (x - y).reconstruct().float_prec()
assert torch.allclose(z, (t - u))
z = (y - x).reconstruct().float_prec()
assert torch.allclose(z, (u - t))
# with constant integer
t = torch.tensor([1.0, -2.0, 3.0])
x = t.fix_prec().share(alice, bob, james, protocol="falcon")
c = 4
z = (x - c).reconstruct().float_prec()
assert torch.allclose(z, (t - c))
z = (c - x).reconstruct().float_prec()
assert torch.allclose(z, (c - t))
# with constant float
t = torch.tensor([1.0, -2.0, 3.0])
x = t.fix_prec().share(alice, bob, james, protocol="falcon")
c = 4.2
z = (x - c).reconstruct().float_prec()
assert torch.allclose(z, (t - c))
z = (c - x).reconstruct().float_prec()
assert torch.allclose(z, (c - t))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#===============================================================================
# Counting Sheep
# https://code.google.com/codejam/contest/6254486/dashboard#s=p0
#===============================================================================
from __future__ import unicode_literals
from codejam.common import CodeJamIO, Problem, ProblemInstance
#------------------------------------------------------------------------------
class CountSheeps(ProblemInstance):
def __init__(self):
self.n = CodeJamIO.read_int()
def solve(self):
if self.n == 0:
return 'INSOMNIA'
digits2 = set()
n = 0
while len(digits2) != 10:
n += self.n
for d in str(n):
digits2.add(d)
return '{}'.format(n)
#------------------------------------------------------------------------------
if __name__ == '__main__':
p = Problem(CountSheeps)
p.solve()
|
"""
Django settings for communicationsexample project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '152-mhd6)p6r&x(e=%k&$q5!dl8uug$f04k@7&^*6j_xpkucl@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gurucommunication',
'django_markdown2',
'anymail',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'communicationsexample.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'communicationsexample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
## Mail variables:
# SMS / TWILLIO
TWILLIO_SID = os.environ.get('TWILLIO_SID', None)
TWILLIO_AUTH_TOKEN = os.environ.get('TWILLIO_AUTH_TOKEN', None)
TWILLIO_PHONE_NUMBER = os.environ.get('TWILLIO_PHONE_NUMBER', None)
SMS_BACKEND = 'services.backends.twillio.TwillioBackend'
# EMAIL / MailGun
ANYMAIL = {
# (exact settings here depend on your ESP...)
"MAILGUN_API_KEY": os.environ.get('MAILGUN_API_KEY', None),
"MAILGUN_SENDER_DOMAIN": os.environ.get('MAILGUN_SENDER_DOMAIN', None), # your Mailgun domain, if needed
}
EMAIL_BACKEND = "anymail.backends.mailgun.MailgunBackend" # or sendgrid.SendGridBackend, or...
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL', None)
class GURU_MEDIUMS:
SMS = 'SMS'
EMAIL = 'EMAIL'
GURU_COMMUNICATIONS = {
'SMS_BACKEND': 'gurucommunication.backends.twillio.TwillioBackend',
'PREFERENCE_ORDER': ['SMS', 'EMAIL', 'NOTIFICATION'],
'DEFAULT_DELIVERY_METHOD': 'EMAIL'
}
|
from collections import namedtuple
import os
from ergaleia.import_by_path import import_by_path
from ergaleia.load_from_path import load_lines_from_path
from ergaleia.to_args import to_args
from ergaleia.un_comment import un_comment
class _branch(dict):
def __lookup(self, name):
if name not in self:
raise KeyError(name)
return self.get(name)
def __getattr__(self, name):
attr = self.__lookup(name)
if isinstance(attr, _item):
return attr.value
return attr
def __getitem__(self, name):
return self.__getattr__(name)
def __setattr__(self, name, value):
attr = self.__lookup(name)
if not isinstance(attr, _item):
raise AttributeError(name)
attr.value = value
def __setitem__(self, name, value):
self.__setattr__(name, value)
class Config(_branch):
""" Manage 'key=value' style configuration records.
A Config is a constrained-key nested-dict whose values can be acccessed
using bracket or dot notation. Typically, the expected keys are defined
during setup, and the values are read from a configuration file.
Use a series of calls to the _define method to establish a valid
structure. Key names can contain dots (.) which act to separate the
name space and mitigate access to the Config data structure. For
instance:
c = Config()
c._define('server.port', value=1234)
# access the new key
assert c.server.port == 1234
# bracket notation works too
assert c['server']['port'] == 1234
Use the _load method to read and parse the values in a config file.
Only defined keys will be accepted. To change the default value in the
previous example, the config file might look like this:
server.port=2345
Notes:
1. Methods are prepended with '_' in order not to pollute the namespace
used by the defined values.
2. Valid names are composed of letters, digits, underscores and periods.
No part of a valid name can be composed only of digits.
3. The _load method ignores leading and trailing whitespace in the
names and values.
4. The _load method ignores anything including and following a '#'
character, thus allowing for comments. To prevent a '#' value from
starting a comment, escape it by preceeding it with a '\' character.
5. If the env parameter is specified on the _define function, and an
env variable of this name is set, then the value of the env variable
overrides the 'value' parameter and any parmemter read using the _load
method. Values which are directly set override env.
"""
def __init__(self, definition=None, filetype=None):
self.__dict__['_ordered_keys'] = []
if definition:
self._define_from_path(definition, filetype)
def __repr__(self):
return '\n'.join(
'{}={}'.format(
k, v if v is not None else ''
) for k, v in self.__ordered()
)
def __ordered(self):
return [(k, self._get(k)) for k in self.__dict__['_ordered_keys']]
def __lookup(self, name):
level = self
parts = str(name).split('.')
parts, itemname = parts[:-1], parts[-1]
for part in parts:
level = level[part]
return level, itemname
def _define(self, name, value=None, validator=None, env=None):
keys = self.__dict__['_ordered_keys']
if name not in keys:
keys.append(name)
parts = str(name).split('.')
parts, itemname = parts[:-1], parts[-1]
level = self
for part in parts:
level = level.setdefault(part, _branch())
if isinstance(level, _item):
raise Exception(
'member {} of {} is a leaf node'.format(part, name)
)
item = level.get(itemname)
if item:
if isinstance(item, _item):
item.reset(value, validator, env)
else:
raise Exception(
'member {} of {} is a branch node'.format(itemname, name)
)
else:
level.setdefault(itemname, _item(value, validator, env))
def _define_from_path(self, path, filetype=None):
data = un_comment(load_lines_from_path(path, filetype))
for num, line in enumerate(data, start=1):
if not line:
continue
try:
args, kwargs = to_args(line)
if 'validator' in kwargs:
validator = kwargs.get('validator')
try:
kwargs['validator'] = _VALIDATE_MAP[validator]
except KeyError:
try:
kwargs['validator'] = import_by_path(validator)
except Exception:
raise Exception(
'Invalid validator: {}'.format(validator)
)
self._define(*args, **kwargs)
except Exception as e:
raise Exception(
'Error on line {} of definition: {}'.format(num, e)
)
def _load(self, path='config', filetype=None, relaxed=False, ignore=False):
""" load key value pairs from a file
Parameters:
path - path to configuration data (see Note 1)
filetype - type component of dot-delimited path
relaxed - if True, define keys on the fly (see Note 2)
ignore - if True, ignore undefined keys in path
Return:
self
Notes:
1. The path can be:
* an open file object with a readlines method
* a dot delimited path to a file (see normalize_path)
* an os-specific path to a file (relative to cwd)
* an iterable of key=value strings
2. Normally keys read from the file must conform to keys
previously defined for the Config. If the relaxed flag
is True, any keys found in the file will be accepted.
If the ignore flag is True, and kyes found in the file
that are not previously defined are ignored.
"""
for num, line in enumerate(
un_comment(load_lines_from_path(path, filetype)),
start=1,
):
if not line:
continue
try:
key, val = line.split('=', 1)
key = key.strip()
val = val.strip()
if relaxed:
self._define(key)
try:
level, itemname = self.__lookup(key)
except KeyError:
if ignore:
continue
raise
item = level.get(itemname)
if item is None:
raise KeyError(itemname)
item.load(val)
except Exception as e:
args = e.args or ('',)
msg = 'line {} of config: {}'. format(num, args[0])
e.args = (msg,) + args[1:]
raise
return self
@property
def _as_dict(self):
return {k: v for k, v in self.__ordered()}
def _get(self, name):
level, itemname = self.__lookup(name)
try:
return level[itemname]
except TypeError:
raise KeyError(itemname)
def _set(self, name, value):
level, itemname = self.__lookup(name)
level[itemname] = value
class _item(object):
def __init__(self, value, validator, env):
self.reset(value, validator, env)
def __setattr__(self, name, value):
""" directly setting value does not respect env """
validator = self.validator
if value and validator:
value = validator(value)
self.__dict__['value'] = value
def reset(self, value, validator, env):
self.__dict__['validator'] = validator
self.__dict__['env'] = env
if env:
value = os.getenv(env, value)
self.value = value
def load(self, value):
""" enforce env > value when loading from file """
self.reset(
value,
validator=self.__dict__.get('validator'),
env=self.__dict__.get('env'),
)
def validate_bool(value):
if value in (0, 1):
return (False, True)[value]
if value in (True, False):
return value
try:
return {'TRUE': True, 'FALSE': False}[value.upper()]
except AttributeError:
raise ValueError
except KeyError:
raise ValueError
def validate_file(value):
if len(value) == 0:
return value
if os.path.isfile(value):
return value
raise Exception("file '%s' not found" % value)
_VALIDATE_MAP = {
'int': int,
'bool': validate_bool,
'file': validate_file,
}
class Mini(object):
""" limited one-level config
Define field names (no dots) with string arguments to the constructor.
Set values in the normal way, or using kwargs to the set method.
Load values from a file with the load method.
Return a namedtuple of key-values with the as_tuple method.
"""
def __init__(self, *args):
self.__dict__['conf'] = Config(args)
def __getattr__(self, name):
return self.conf[name]
def __setattr__(self, name, value):
self.__dict__['conf'][name] = value
def set(self, **kwargs):
for k, v in kwargs.items():
self.__dict__['conf'][k] = v
def load(self, path):
self.conf._load(path)
def as_tuple(self, name='MiniConfig'):
return namedtuple(name, self.conf._ordered_keys)(
**{n: self.conf._get(n) for n in self.conf._ordered_keys}
)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='parse a config file',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'-c', '--config',
type=argparse.FileType('r'),
help='configuration file'
)
parser.add_argument(
'-d', '--defn',
type=argparse.FileType('r'),
help='config definition file'
)
parser.add_argument(
'-r', '--relaxed', default=False, action='store_true',
help='accept all keys found in config file (define on the fly)'
)
args = parser.parse_args()
c = Config(args.defn)
if args.config:
c._load(args.config, relaxed=args.relaxed)
print(c)
|
#!/usr/bin/python2
import os
import subprocess
import sys
import matplotlib as mpl
import numpy as np
try:
subprocess.check_output("env | grep DISPLAY", shell=True).decode('utf-8')
except:
mpl.use("Agg")
import matplotlib.pyplot as plt
#framealpha is only availabe for matplotlib version >= 1.2.0
oldversion = False
if(int(mpl.__version__.split(".")[0]) <= 1 and
int(mpl.__version__.split(".")[1]) < 2):
oldversion = True
if(not len(sys.argv) in [2, 3]):
print("ERROR: Invalid number of parameters.\nSyntax: " + sys.argv[0] +\
" LOGFILE_FOLDER [IMAGE_FILENAME] ")
sys.exit(1)
folder = sys.argv[1]
intflogs = []
memlogs = []
cpulogs = []
for filename in os.listdir(folder):
if filename[:13] == "maxinet_intf_":
intflogs.append(filename)
elif filename[:12] == "maxinet_mem_":
memlogs.append(filename)
elif filename[:12] == "maxinet_cpu_":
cpulogs.append(filename)
else:
print("unknown file: " + filename)
numworkers = len(memlogs)
graph = 0
# CPU
fig, ax = plt.subplots(2 + numworkers, sharex=True)
fig.set_size_inches(8.3, 1.5 * numworkers + 5)
for filename in cpulogs:
sfilename = filename.split("_")
workerID = sfilename[2]
workerHN = sfilename[3][1:-5]
axis_x = []
axis_y = []
for line in open(os.path.join(folder, filename)):
line = line.split()
if("all" in line):
timestamp = int(line[0])
idle = float(line[-1])
used = 100 - idle
axis_x.append(timestamp)
axis_y.append(used)
ax[graph].plot(axis_x, axis_y, label=workerHN)
print(ax[graph])
xmin, xmax = map(lambda x: int(x), ax[graph].get_xaxis().get_data_interval())
xticks = range(xmin, xmax)[0::10]
xlabels = map(lambda x: x - xmin, xticks)
ax[graph].get_xaxis().set_ticks(xticks)
ax[graph].get_xaxis().set_ticklabels(xlabels)
ax[graph].set_ylabel("CPU utilization [%]")
if oldversion:
ax[graph].legend(loc="best")
else:
ax[graph].legend(framealpha=0.5, loc="best")
#Intf
graph += 1
for filename in intflogs:
sfilename = filename.split("_")
workerID = sfilename[3]
workerHN = sfilename[4][1:-5]
axis_x = []
axis_y_rx = []
axis_y_tx = []
for line in open(os.path.join(folder, filename)):
line = line.split(",")
timestamp = int(line[0])
rx_bytes = int(line[1])
tx_bytes = int(line[2])
axis_x.append(timestamp)
axis_y_rx.append((rx_bytes / 1000000.0) * 8.0)
axis_y_tx.append((tx_bytes / 1000000.0) * 8.0)
ax[graph].plot(axis_x, axis_y_rx, label=workerHN + " RX")
ax[graph].plot(axis_x, axis_y_tx, label=workerHN + " TX")
ax[graph].set_ylabel("Data rate [Mbit/s]")
if oldversion:
ax[graph].legend(loc="best")
else:
ax[graph].legend(framealpha=0.5, loc="best")
#Mem
for filename in memlogs:
graph += 1
sfilename = filename.split("_")
workerID = sfilename[2]
workerHN = sfilename[3][1:-5]
axis_x = []
axis_y_free = []
axis_y_buffers = []
axis_y_cached = []
for line in open(os.path.join(folder, filename)):
line = line.split(",")
timestamp = line[0]
free = int(line[1])
buffers = int(line[2])
cached = int(line[3])
axis_x.append(int(timestamp))
axis_y_free.append(free / 1024.0)
axis_y_buffers.append(buffers / 1024.0)
axis_y_cached.append(cached / 1024.0)
axis_y = np.row_stack((axis_y_free, axis_y_buffers, axis_y_cached))
axis_y_stack = np.cumsum(axis_y, axis=0)
print(axis_y_stack)
axis_x = np.array(axis_x)
print(axis_x)
ax[graph].fill_between(axis_x, 0, axis_y_stack[0, :], facecolor="#CC6666")
ax[graph].fill_between(axis_x, axis_y_stack[0, :], axis_y_stack[1, :],
facecolor="#1DACD6")
ax[graph].fill_between(axis_x, axis_y_stack[1, :], axis_y_stack[2, :],
facecolor="#6E5160")
ax[graph].set_ylabel("Memory [MB]")
ax[graph].set_ylim(bottom=0)
if(graph == numworkers + 1):
ax[graph].set_xlabel("Time [s]")
if oldversion:
ax[graph].legend([mpl.patches.Rectangle((0, 0), 1, 1, fc="#CC6666"),
mpl.patches.Rectangle((0, 0), 1, 1, fc="#1DACD6"),
mpl.patches.Rectangle((0, 0), 1, 1, fc="#6E5160")
],
["MemFree", "Buffers", "Cached"], loc="best")
else:
ax[graph].legend([mpl.patches.Rectangle((0, 0), 1, 1, fc="#CC6666"),
mpl.patches.Rectangle((0, 0), 1, 1, fc="#1DACD6"),
mpl.patches.Rectangle((0, 0), 1, 1, fc="#6E5160")
],
["MemFree", "Buffers", "Cached"], framealpha=0.5,
loc="best")
if(len(sys.argv) == 3):
plt.savefig(sys.argv[2])
try:
plt.show()
except:
pass # Fail silently if no x is avaiable
|
def write_new(first, last, id_num):
"""
Create a new entry in the storage text file.
:param first: the first name
:param last: the last name
:param id_num: id number
:return: None
"""
with open("database.txt", "a") as f:
f.write("{}, {}, {}\n".format(first, last, id_num))
def get_id(first_name, last_name):
"""
:param first_name: The first_name to search for.
:param last_name: The last_name to search for.
:return: The id number for the given first/last name, otherwise None.
"""
with open("database.txt", "r") as file:
for line in file:
line = line.rstrip()
if not line:
continue
first, last, _id = line.split(", ")
if first_name == first and last_name == last:
return _id
return None
def get_name(id_num):
"""
Return the first and last name associated with an ID.
:param id_num: the id number to search for
:return: a tuple of the (first_name, last_name), or (None, None) if not found
"""
with open("database.txt", "r") as f:
for line in f:
line = line.rstrip()
id_num_string = str(id_num)
if not line:
continue
first, last, _id = line.split(", ")
if id_num_string == _id:
return first, last
return None, None
|
"""This module contains the general information for FirmwareConstraints ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FirmwareConstraintsConsts:
pass
class FirmwareConstraints(ManagedObject):
"""This is FirmwareConstraints class."""
consts = FirmwareConstraintsConsts()
naming_props = set([])
mo_meta = MoMeta("FirmwareConstraints", "firmwareConstraints", "fw-constraints", VersionMeta.Version223a, "InputOutput", 0x1f, [], [""], ['diagSrvCapProvider', 'equipmentBladeCapProvider', 'equipmentCatalogCapProvider', 'equipmentChassisCapProvider', 'equipmentDbgPluginCapProvider', 'equipmentIOExpanderCapProvider', 'equipmentMgmtCapProvider', 'equipmentMgmtExtCapProvider', 'equipmentRackEnclosureCapProvider', 'equipmentRackUnitCapProvider', 'equipmentServerUnitCapProvider', 'equipmentSiocCapProvider', 'equipmentStorageEncCapProvider', 'equipmentSwitchCapProvider'], ['firmwareConnectionManagementChassisConstraint', 'firmwareDriveSecurityConstraint', 'firmwareDualVicChassisConstraint', 'firmwareDualVicServerConstraint', 'firmwarePCHStorageConfigConstraint', 'firmwareProcessorTypeConstraint', 'firmwareRackPowerCapConstraint', 'firmwareSecondControllerChassisConstraint', 'firmwareSecondControllerServerConstraint', 'firmwareServerChassisConstraint', 'firmwareServerTypeConstraint', 'firmwareSinglePathChassisConstraint', 'firmwareTPMConstraint', 'firmwareVicSlotConstraint', 'firmwareVnicCdnConstraint', 'firmwareWaterbaySiocServerConstraint'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version223a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version223a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version223a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version223a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "FirmwareConstraints", parent_mo_or_dn, **kwargs)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect
from django.urls import reverse
from django.http import HttpResponse, JsonResponse
from django.utils.translation import ugettext_lazy as _
import main.settings_environ as settings_environ
from main.settings_deploy import DOMAIN_NAME
if settings_environ.MAPBOX_KEY != None:
# production settings; app is running on server
from main.settings_environ import MAPBOX_KEY
else:
# development settings; app is running locally
from main.settings_sensitive import MAPBOX_KEY
from ..geography.views import Location
from ..jobs.views import Listings
from ..jobs.models import Job
from main.settings import LANGUAGE_CODE as DEFAULT_LANGUAGE
def redirect_language(request):
return redirect('/' + DEFAULT_LANGUAGE + '/')
def index(request):
title = DOMAIN_NAME
neighborhood = request.GET.get('neighborhood', 'Wicker Park').replace('+', ' ')
# set default map
location = Location(neighborhood).get_coordinates()
# jobs = Listings().retrieve_jobs().build_jobs()
jobs = Job.objects.all()
context = {
'title': title,
'location': location,
'api_key': MAPBOX_KEY,
'jobs': jobs
}
return render(request, "home/index.html", context)
def search(request):
# retrieve query from the request
query = request.GET.get('query', 'Logan Square').replace('+', ' ')
# process query and get relevant map data
location = Location(query).get_coordinates()
print("query =", query, "coordinates =", location.coordinates)
# return map data
return JsonResponse({"latitude": location.coordinates[0],
"longitude":location.coordinates[1]
})
def test(request):
context = {
'access_token': MAPBOX_KEY
}
return render(request, "home/test_map.html", context)
def eggs(request):
return "Poop"
|
import os
import xml.etree.ElementTree as ET
from rogue_sky import darksky
DARKSKY_API_KEY = os.environ["DARKSKY_SECRET_KEY"]
def test_ping(backend_api_client):
response = backend_api_client.get("/api/health")
assert response.status_code == 200
assert response.get_json() == {"status": "ok"}
def test_get_blog_posts(backend_api_client):
response = backend_api_client.get("/api/blog")
assert response.status_code == 200
actual = response.get_json()
assert isinstance(actual, list)
assert len(actual) > 0
assert not set(actual[0]) - set(["title", "date", "summary", "url"])
def test_get_blog_post(backend_api_client):
response = backend_api_client.get("/api/blog/something-worth-writing")
assert response.status_code == 200
actual = response.get_json()
assert isinstance(actual, dict)
assert not set(actual) - set(["title", "date", "summary", "content", "url"])
def test_get_rss(backend_api_client):
response = backend_api_client.get("/api/rss")
assert response.status_code == 200
assert ET.fromstring(response.data.decode())
def test_get_astronomical_events(backend_api_client):
response = backend_api_client.get("/api/astronomical_events")
assert response.status_code == 200
actual = response.get_json()
assert isinstance(actual, list)
assert len(actual) > 0
actual = actual[0]
assert isinstance(actual, dict)
assert not set(actual) - set(["date", "event", "info", "type"])
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import attr
import config
import pandas as pd
@attr.s
class cruncher(object):
def read(self, **kwargs):
df = {}
for ws, col in zip(kwargs["sheets"], kwargs["columns"]):
df[ws] = pd.read_excel(
kwargs["filename"], sheet_name=ws, usecols=[col], squeeze=True,
) # Squeeze will render a series insetad of a data frame.
df[ws].fillna(0, inplace=True)
return df
def quant(self, series):
if series.size < 5: # i.e. the test did not run for a long time
return [0, 0, 0]
else:
return list(series.quantile(q=[0.25, 0.50, 0.95], interpolation="nearest"))
def xls_to_csv(self, xlsfilepath):
"""helper func to conv file from xls to csv"""
try:
fname, _ = os.path.splitext(os.path.basename(xlsfilepath))
xls_file = pd.read_excel(xlsfilepath)
xls_file.to_csv(
f"{config.TAS.get('test_report_path')}{fname}.csv",
index=None,
header=True,
)
except Exception as e:
logging.error(f"error while converting file from xls to csv {e}")
return False, ""
return True, fname + ".csv"
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .coons import *
from .tweening import *
__all__ = [name for name in dir() if not name.startswith('_')]
|
class Solution:
"""
@param A: A string
@param B: A string
@return: if string A contains all of the characters in B return true else return false
"""
def compareStrings(self, A, B):
A = list(A)
for b in B:
if b not in A:
return False
A.remove(b)
return True
|
import logging
from .particle import Particle
logger = logging.getLogger(__name__)
class ParticleSwarmOptimization(Particle):
def __init__(self):
super().__init__()
# augmented parameters
self.v = None
self.xl = None
# AIWPSO
self.pfit = 0.0 # fitness value of the previous iteration
# TensorPSO
self.t_v = [] # tensor velocity (matrix)
self.t_xl = [] # tensor local best (matrix)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2015-06-02 20:50
#
# Filename: kernelclean.py
#
# Description: All Rights Are Reserved
#
"""
#import scipy as sp
#import math as m
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D as Ax3
#from scipy import stats as st
#from matplotlib import cm
#import numpy as np
from __future__ import absolute_import
import os
import re
import sys
import subprocess as sp
import platform as pf
from getpass import getpass
import hashlib
if sys.version.startswith("3.4."):
from functools import reduce
from packages.fileparser.extractor import Extractor
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;31m'
self.tipcolor = '\033[0;32m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self, color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
class KernelClean(object):
"""
Cleanup the Fedora Linux kernel after `dnf(yum) update`.
"""
def __init__(self, check=0):
self._filebuf = 'kernelclean'
self.kernel = ''
self.exist_kernels = []
self.old_kernel = []
self.kernel_clean = []
self.color = PyColor()
# self.check for manual check to remove system kernel(1 for check, 0 for not check)
self.check = check
self.record = []
def in_using_kernel(self):
"""
RPM query about the kernel existing in the system => self._filebuf
Get the version of running kernel => self.kernel
***rewrite the using kernel finding***
command_rpm_kernel = 'rpm -qa | grep "^kernel-" > '
command_rpm_kernel += self._filebuf
os.system(command_rpm_kernel)
command_kernel = 'uname -r'
pipeout = sp.Popen(command_kernel.split(), stdout=sp.PIPE)
self.kernel = pipeout.stdout.readline().rstrip().decode('utf-8')
"""
pipeout = sp.Popen('uname -r'.split(), stdout=sp.PIPE)
self.kernel = pipeout.stdout.readline().rstrip().decode('utf-8')
out = sp.Popen('rpm -qa'.split(), stdout=sp.PIPE)
for ls in out.stdout.readlines():
pattern = '^kernel-'
ls = ls.rstrip().decode('utf-8')
if re.match(pattern, ls):
self.exist_kernels.append(ls)
def find_old_kernel(self):
"""
Find the old kernel in system => self.old_kernel
"""
pattern = "^kernel-[a-zA-Z-]*([0-9.-]*)([a-zA-Z]+)(.*)"
self.record = set([re.match(pattern, item).groups() for item in self.exist_kernels])
self.old_kernel = [item for item in self.record if item[0] not in self.kernel]
def to_cleaned_kernel(self):
"""
Ensure the to be cleaned kernel in queried list => self.kernelclean
"""
if self.old_kernel:
kernel_clean_id = []
[kernel_clean_id.append(''.join(item)) for item in list(self.old_kernel)]
for id in kernel_clean_id:
[self.kernel_clean.append(item) for item in self.exist_kernels if id in item]
def cleanup(self):
"""
Cleanup the old kernel
"""
if self.old_kernel:
reboot = input(self.color.endcolor + 'Do You Need to Reboot System?(y or n)\n')
if reboot == 'y':
os.system('reboot')
elif reboot == 'n':
print(self.color.warningcolor + 'Cleanup Kernel ...' + self.color.endcolor)
pwd_md5 = 'b04c541ed735353c44c52984a1be27f8'
pwd = getpass("Enter Your Password: ")
if hashlib.md5(pwd.encode('utf-8')).hexdigest() != pwd_md5:
print(self.color.warningcolor + "Wrong Password" + self.color.endcolor)
print('\033[0;36m' + "Try Angain" + '\033[0m')
pwd = getpass("Enter Your Password: ")
if hashlib.md5(pwd.encode('utf-8')).hexdigest() != pwd_md5:
return
echo = ['echo']
echo.append(pwd)
if pf.linux_distribution()[1] > '21':
command = 'sudo -S dnf -y remove '
for item in self.kernel_clean:
command += item
command += ' '
else:
command = 'sudo -S yum -y remove '
for item in self.kernel_clean:
command += item
command += ' '
pipein = sp.Popen(echo, stdout=sp.PIPE)
pipeout = sp.Popen(command.split(), stdin=pipein.stdout, stdout=sp.PIPE)
for line in pipeout.stdout.readlines():
if line == '':
break
if isinstance(line, bytes):
line = line.decode()
print(line)
print(self.color.tipcolor + 'End Cleanup!' + self.color.endcolor)
print(self.color.warningcolor +\
'Your Kernel is Update!' +\
self.color.endcolor)
def main(self):
"""
Union the cleanup stream
"""
self.in_using_kernel()
self.find_old_kernel()
self.to_cleaned_kernel()
if self.check == 1:
if self.old_kernel:
print(self.color.tipcolor + 'Your Old Kernel: ')
for item in self.old_kernel:
print(''.join(item))
print(self.color.warningcolor + 'In Using Kernel: ')
print(self.kernel + self.color.endcolor)
check_cmd = input('Remove the old kernel?(y or n)\n')
if check_cmd == 'y':
self.cleanup()
else:
print('\033[36m' + 'Do Not Remove Old kernel' + '\033[0m')
else:
print(self.color.tipcolor +\
'Your System Has No Old Kernel To Cleanup!' +\
self.color.endcolor)
if __name__ == '__main__':
TEST = KernelClean(1)
TEST.in_using_kernel()
TEST.find_old_kernel()
TEST.to_cleaned_kernel()
|
import configparser as cp
import os
config = cp.ConfigParser()
path = os.path.abspath(os.path.dirname(__file__)) + '/config.ini'
config.read(path) |
from starkware.crypto.signature.signature import pedersen_hash, private_to_stark_key, sign
from starkware.starknet.public.abi import get_selector_from_name
class Signer():
def __init__(self, private_key):
self.private_key = private_key
self.public_key = private_to_stark_key(private_key)
def sign(self, message_hash):
return sign(msg_hash=message_hash, priv_key=self.private_key)
|
"""
Main entrypoint for finetuning
It could be a function that wraps the huggingface Trainer and picks appropriate finetuning
parameters depending on the language
E.g.
from wav2vec_toolkit.finetune import finetune
finetune(base_model="facebook/wav2vec-xlsr", dataset="common_voice", language="fr", max_epochs=100)
"""
import os
from dataclasses import dataclass, field
from typing import Optional
from transformers import HfArgumentParser
import wandb
@dataclass
class FakeArguments:
epochs: Optional[int] = field(
default=1, metadata={"help": "Fake number of epochs"}
)
learning_rate: Optional[float] = field(
default=0.001, metadata={"help": "Fake learning rate"}
)
dropout: Optional[float] = field(
default=0.5, metadata={"help": "Fake learning rate"}
output_dir: Optional[str]=field(
default='', metadata={"help": "Fake output directory"}
)
def main():
parser=HfArgumentParser((FakeArguments,))
# start a new run and log all args
args=parser.parse_args()
wandb.init(config=args, project='xlsr')
# main loop
fake_args, = parser.parse_args_into_dataclasses()
import random, time
time.sleep(5)
test_wer=random.random()
# logging will be automatically done by Trainer
wandb.log({'test/wer': test_wer})
# log model files as artifact
artifact=wandb.Artifact(name=f"model-{wandb.run.id}", type="model", metadata={'wer': test_wer})
for f in Path(training_args.output_dir).iterdir():
if f.is_file():
artifact.add_file(str(f))
wandb.run.log_artifact(artifact)
|
from .df_utils import *
__version__ = "0.0.2"
author_name = "Anastasia Lysenko"
my_str = "This is my_str from __init__.py"
|
import logging
from greent.triplestore import TripleStore
from greent.util import LoggingUtil
import unittest
from pprint import pprint
logger = LoggingUtil.init_logging(__name__)
class MeSH(object):
def __init__(self, uri="http://id.nlm.nih.gov/mesh/sparql"):
self.triplestore = TripleStore (uri)
def get_broader (self, term):
return self.triplestore.query_template (
inputs={ "term" : term, "prefixes" : self.get_prefixes () },
outputs= [ "obj", "name" ],
template_text="""
$prefixes
SELECT DISTINCT ?obj ?name ?itemName FROM <http://id.nlm.nih.gov/mesh>
WHERE {
?item meshv:broaderDescriptor ?obj ;
rdfs:label ?itemName.
?obj rdfs:label ?name .
filter (regex(lcase(str(?itemName)), lcase(str("$term"))))
}
ORDER BY ?p
""")
"""
SELECT DISTINCT ?obj ?name FROM <http://id.nlm.nih.gov/mesh>
WHERE {
$term meshv:broaderDescriptor ?obj .
?obj rdfs:label ?name .
}
ORDER BY ?p
"""
def get_prefixes (self):
return """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX meshv: <http://id.nlm.nih.gov/mesh/vocab#>
PREFIX mesh: <http://id.nlm.nih.gov/mesh/>
PREFIX mesh2015: <http://id.nlm.nih.gov/mesh/2015/>
PREFIX mesh2016: <http://id.nlm.nih.gov/mesh/2016/>
PREFIX mesh2017: <http://id.nlm.nih.gov/mesh/2017/>"""
class TestMeSH(unittest.TestCase):
m = MeSH ()
def test_get_broader (self):
pprint (self.m.get_broader ("Asthma")) #"mesh:D001249"))
if __name__ == '__main__':
unittest.main ()
|
from fabric.api import env
from fabric.api import run
from fabric.api import sudo
from flask import current_app
from multistack.log import LogStream
from multistack.constants import *
class Remote:
"""
A class to handle ssh communications with remote systems.
"""
def __init__(self, address, user, key_location):
"""
Remote class initialization function.
@param address: IP address or the remote host
@type address: C{str}
@param user: Username to be used for authentication
@type user: C{str}
@param key_location: Path of the private key_location
@type key_location: C{str}
"""
self.address = address
self.user = user
self.key = key_location
self.logstream = LogStream()
self.logstream.add_logger(current_app.logger)
def run(self, command):
"""
Execute a command on the remote host as self.user
@param command: Command to be executed
@type command: C{str}
"""
env.host_string = self.address
env.key_filename = self.key
env.user = self.user
env.disable_known_hosts = True
env.connection_attempts = SSH_ATTEMPTS
env.timeout = SSH_TIMEOUT
return run(command, stdout = self.logstream, stderr = self.logstream)
def sudo(self, command, user=None, pty=False):
"""
Executes command on the remote host using sudo as user
@param command: Command to be executed
@type command: C{str}
@param user: User on whose behalf the command will be executed
@type user: C{str}
"""
env.host_string = self.address
env.key_filename = self.key
env.user = self.user
env.disable_known_hosts = True
env.connection_attempts = SSH_ATTEMPTS
env.timeout = SSH_TIMEOUT
return sudo(command, user=user, pty=False, stdout = self.logstream, stderr = self.logstream)
|
import os
from hashlib import md5
from time import time
import jwt
from flask import current_app as junior_app
from flask import flash, redirect, session, url_for
from flask_bcrypt import check_password_hash, generate_password_hash
from src.extensions import db
class User(db.Model): # noqa: WPS230
__tablename__ = 'users'
__table_args__ = {'extend_existing': True}
def __init__( # noqa: S107 WPS211
self,
login: str,
email: str,
password: str = '',
firstname: str = '',
middlename: str = '',
lastname: str = '',
image: str = '',
gravatar: str = '',
github_id: str = None,
yandex_id: str = None,
is_oauth: bool = False,
is_superuser: bool = False,
is_aproved: bool = False,
):
self.login = login
self.password = password
self.email = email
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.image = image
self.gravatar = gravatar
self.github_id = github_id
self.yandex_id = yandex_id
self.is_oauth = is_oauth
self.is_superuser = is_superuser
self.is_aproved = is_aproved
id = db.Column(db.Integer, primary_key=True) # noqa: A003
login = db.Column(db.String(), unique=True)
password = db.Column(db.String())
email = db.Column(db.String(), unique=True)
firstname = db.Column(db.String(), nullable=True)
middlename = db.Column(db.String(), nullable=True)
lastname = db.Column(db.String(), nullable=True)
image = db.Column(db.String(), default=email, nullable=True)
gravatar = db.Column(
db.Enum('gravatar', 'face', ''),
default='gravatar',
name='avatar_type',
nullable=True,
)
github_id = db.Column(db.String(), nullable=True)
yandex_id = db.Column(db.String(), nullable=True)
is_oauth = db.Column(db.Boolean, default=False, nullable=False)
is_superuser = db.Column(db.Boolean, default=False, nullable=False)
is_aproved = db.Column(db.Boolean, default=False, nullable=True)
db.relationship( # noqa: WPS604
'User', backref='users', lazy='dynamic',
)
question_relation = db.relationship(
'TestQuestionUserRelation',
back_populates='user',
)
answer_relation = db.relationship(
'AnswerUsersRelations',
back_populates='user',
)
answers = db.relationship('Answer', back_populates='users', uselist=False)
def __str__(self):
return '{0} <id {1}>'.format(self.login, self.id)
def avatar(self, size):
# если нет строки с картинкой, используется email
# и обновляется БД - теперь в картинке email
if self.image is None:
image_str = self.email
User.query.filter_by(id=session['auth'].user.id).update({'image': self.email})
db.session.commit()
else:
image_str = self.image
if not self.gravatar or self.gravatar == 'gravatar':
digest = md5(image_str.encode('utf-8')).hexdigest()
image_str = f'{os.getenv("GRAVATAR_API")}{digest}?d=identicon&s={size}'
if self.gravatar == 'face':
image_str = f'{os.getenv("FACE_API")}{size}/{self.image}.png'
return image_str
def __repr__(self):
return '<id {0}>'.format(self.id)
@classmethod
def hash_password(cls, password: str):
return generate_password_hash(password=password)
def check_password(self, password):
if not self.password:
return False
return check_password_hash(self.password, password)
def get_token_for_mail_aproved(self, expires_in=600):
"""Функция генерации токена.
Она нужна для подтверждения
регистрации пользователя через электронную почту
expires_in - время действия токена в секундах
"""
return jwt.encode(
{'user_id': self.id, 'exp': time() + expires_in},
junior_app.config['SECRET_KEY'],
algorithm='HS256').decode('utf-8')
def get_oauth_dict(self):
"""Возвращает словарь сервисов OAuth, связанных с аккаунтом.
Если не связан, то в качестве ключа пустота.
"""
backends = dict()
user_data = self.__dict__
for backend in junior_app.config['OAUTH_BACKEND']:
backends[backend] = user_data.get(f'{backend}_id', False)
return backends
@classmethod
def verify_token_for_mail_aproved(cls, token):
"""Функция для проверки токена.
Она нужна для подтверждения пользователя
через электронную почту
"""
try:
user_id = jwt.decode(
token,
junior_app.config['SECRET_KEY'],
algorithms=['HS256'],
)['user_id']
except jwt.ExpiredSignatureError:
flash('Ссылка устарела')
return redirect(url_for('auth.login'))
try:
user = User.query.get(user_id)
except User.DoesNotExist():
return None
user.is_aproved = True
return user
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from zope.interface import implements
from buildbot import interfaces
from buildbot.status.buildrequest import BuildRequestStatus
class BuildSetStatus:
implements(interfaces.IBuildSetStatus)
def __init__(self, bsdict, status):
self.id = bsdict['bsid']
self.bsdict = bsdict
self.status = status
self.master = status.master
# methods for our clients
def getReason(self):
return self.bsdict['reason']
def getResults(self):
return self.bsdict['results']
def getID(self):
return self.bsdict['external_idstring']
def isFinished(self):
return self.bsdict['complete']
def getBuilderNamesAndBuildRequests(self):
# returns a Deferred; undocumented method that may be removed
# without warning
d = self.master.db.buildrequests.getBuildRequests(bsid=self.id)
def get_objects(brdicts):
return dict([
(brd['buildername'], BuildRequestStatus(brd['buildername'],
brd['brid'], self.status))
for brd in brdicts ])
d.addCallback(get_objects)
return d
def getBuilderNames(self):
d = self.master.db.buildrequests.getBuildRequests(bsid=self.id)
def get_names(brdicts):
return sorted([ brd['buildername'] for brd in brdicts ])
d.addCallback(get_names)
return d
def waitUntilFinished(self):
return self.status._buildset_waitUntilFinished(self.id)
|
"""
Docker Compose Support
======================
Allows to spin up services configured via :code:`docker-compose.yml`.
"""
import requests
import subprocess
from testcontainers.core.waiting_utils import wait_container_is_ready
from testcontainers.core.exceptions import NoSuchPortExposed
class DockerCompose(object):
"""
Manage docker compose environments.
Parameters
----------
filepath: str
The relative directory containing the docker compose configuration file
compose_file_name: str
The file name of the docker compose configuration file
pull: bool
Attempts to pull images before launching environment
build: bool
Whether to build images referenced in the configuration file
env_file: str
Path to an env file containing environment variables to pass to docker compose
Example
-------
::
with DockerCompose("/home/project",
compose_file_name=["docker-compose-1.yml", "docker-compose-2.yml"],
pull=True) as compose:
host = compose.get_service_host("hub", 4444)
port = compose.get_service_port("hub", 4444)
driver = webdriver.Remote(
command_executor=("http://{}:{}/wd/hub".format(host,port)),
desired_capabilities=CHROME,
)
driver.get("http://automation-remarks.com")
stdout, stderr = compose.get_logs()
if stderr:
print("Errors\\n:{}".format(stderr))
.. code-block:: yaml
hub:
image: selenium/hub
ports:
- "4444:4444"
firefox:
image: selenium/node-firefox
links:
- hub
expose:
- "5555"
chrome:
image: selenium/node-chrome
links:
- hub
expose:
- "5555"
"""
def __init__(
self,
filepath,
compose_file_name="docker-compose.yml",
pull=False,
build=False,
env_file=None):
self.filepath = filepath
self.compose_file_names = compose_file_name if isinstance(
compose_file_name, (list, tuple)
) else [compose_file_name]
self.pull = pull
self.build = build
self.env_file = env_file
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def docker_compose_command(self):
"""
Returns command parts used for the docker compose commands
Returns
-------
list[str]
The docker compose command parts
"""
docker_compose_cmd = ['docker-compose']
for file in self.compose_file_names:
docker_compose_cmd += ['-f', file]
if self.env_file:
docker_compose_cmd += ['--env-file', self.env_file]
return docker_compose_cmd
def start(self):
"""
Starts the docker compose environment.
"""
if self.pull:
pull_cmd = self.docker_compose_command() + ['pull']
self._call_command(cmd=pull_cmd)
up_cmd = self.docker_compose_command() + ['up', '-d']
if self.build:
up_cmd.append('--build')
self._call_command(cmd=up_cmd)
def stop(self):
"""
Stops the docker compose environment.
"""
down_cmd = self.docker_compose_command() + ['down', '-v']
self._call_command(cmd=down_cmd)
def get_logs(self):
"""
Returns all log output from stdout and stderr
Returns
-------
tuple[bytes, bytes]
stdout, stderr
"""
logs_cmd = self.docker_compose_command() + ["logs"]
result = subprocess.run(
logs_cmd,
cwd=self.filepath,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return result.stdout, result.stderr
def exec_in_container(self, service_name, command):
"""
Executes a command in the container of one of the services.
Parameters
----------
service_name: str
Name of the docker compose service to run the command in
command: list[str]
The command to execute
Returns
-------
tuple[str, str, int]
stdout, stderr, return code
"""
exec_cmd = self.docker_compose_command() + ['exec', '-T', service_name] + command
result = subprocess.run(
exec_cmd,
cwd=self.filepath,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return result.stdout.decode("utf-8"), result.stderr.decode("utf-8"), result.returncode
def get_service_port(self, service_name, port):
"""
Returns the mapped port for one of the services.
Parameters
----------
service_name: str
Name of the docker compose service
port: int
The internal port to get the mapping for
Returns
-------
str:
The mapped port on the host
"""
return self._get_service_info(service_name, port)[1]
def get_service_host(self, service_name, port):
"""
Returns the host for one of the services.
Parameters
----------
service_name: str
Name of the docker compose service
port: int
The internal port to get the host for
Returns
-------
str:
The hostname for the service
"""
return self._get_service_info(service_name, port)[0]
def _get_service_info(self, service, port):
port_cmd = self.docker_compose_command() + ["port", service, str(port)]
output = subprocess.check_output(port_cmd, cwd=self.filepath).decode("utf-8")
result = str(output).rstrip().split(":")
if len(result) == 1:
raise NoSuchPortExposed("Port {} was not exposed for service {}"
.format(port, service))
return result
def _call_command(self, cmd, filepath=None):
if filepath is None:
filepath = self.filepath
subprocess.call(cmd, cwd=filepath)
@wait_container_is_ready(requests.exceptions.ConnectionError)
def wait_for(self, url):
"""
Waits for a response from a given URL. This is typically used to
block until a service in the environment has started and is responding.
Note that it does not assert any sort of return code, only check that
the connection was successful.
Parameters
----------
url: str
URL from one of the services in the environment to use to wait on
"""
requests.get(url)
return self
|
#!/usr/bin/env python3
"""*.h5 を読んでONNXなどに変換するスクリプト。"""
import argparse
import os
import pathlib
import sys
import tensorflow as tf
try:
import pytoolkit as tk
except ImportError:
sys.path.insert(0, str(pathlib.Path(__file__).resolve().parent.parent.parent))
import pytoolkit as tk
logger = tk.log.get(__name__)
def main():
tk.utils.better_exceptions()
tk.log.init(None)
parser = argparse.ArgumentParser(
description="hdf5/saved_model を読んでONNXなどに変換するスクリプト。"
)
parser.add_argument(
"mode", choices=("hdf5", "saved_model", "onnx", "tflite"), help="変換先の形式"
)
parser.add_argument("model_path", type=pathlib.Path, help="対象ファイルのパス(*.h5)")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "none"
tf.keras.backend.set_learning_phase(0)
logger.info(f"{args.model_path} Loading...")
model = tk.models.load(args.model_path)
if args.mode == "hdf5":
save_path = args.model_path.with_suffix("h5")
elif args.mode == "saved_model":
save_path = args.model_path.with_suffix("")
elif args.mode == "onnx":
save_path = args.model_path.with_suffix(".onnx")
elif args.mode == "tflite":
save_path = args.model_path.with_suffix(".tflite")
else:
raise ValueError(f"Invalid mode: {args.mode}")
logger.info(f"{save_path} Saving...")
tk.models.save(model, save_path, mode=args.mode)
logger.info("Finished!")
if __name__ == "__main__":
main()
|
import sys
sys.path.append("../../common")
import itertools
from env_indigo import *
def searchSim(bingo, q, minSim, maxSim, metric=None):
print(
"** searchSim({0}, {1}, {2}, {3}) **".format(
q.smiles(), minSim, maxSim, metric
)
)
result = bingo.searchSim(q, minSim, maxSim, metric)
rm = result.getIndigoObject()
while result.next():
try:
mol = bingo.getRecordById(result.getCurrentId())
print(
"\t{0} {1} {2}".format(
result.getCurrentId(), rm.smiles(), mol.smiles()
)
)
except BingoException as e:
print("BingoException: {0}".format(getIndigoExceptionText(e)))
result.close()
def searchSub(bingo, q, options=""):
print("** searchSub({0}, {1}) **".format(q.smiles(), repr(options)))
result = bingo.searchSub(q, options)
rm = result.getIndigoObject()
while result.next():
try:
mol = bingo.getRecordById(result.getCurrentId())
print(
"\t{0} {1} {2}".format(
result.getCurrentId(), rm.smiles(), mol.smiles()
)
)
except BingoException as e:
print("\tBingoException: {0}".format(getIndigoExceptionText(e)))
result.close()
def searchExact(bingo, q, options=""):
print("** searchExact({0}, {1}) **".format(q.smiles(), repr(options)))
result = bingo.searchExact(q, options)
rm = result.getIndigoObject()
found = False
while result.next():
try:
mol = bingo.getRecordById(result.getCurrentId())
print(
"\t{0} {1} {2}".format(
result.getCurrentId(), rm.smiles(), mol.smiles()
)
)
found = True
except BingoException as e:
print("\tBingoException: {0}".format(getIndigoExceptionText(e)))
result.close()
assert found
indigo = Indigo()
bingo = Bingo.createDatabaseFile(
indigo, joinPathPy("db_molecule", __file__), "molecule", ""
)
index = 0
wrongStructures = 0
mols = []
for mol in indigo.iterateSDFile(
joinPathPy("molecules/bingo_mols.sdf", __file__)
):
try:
bingo.insert(mol, index)
mols.append(mol)
except BingoException as e:
print(
"Structure {0} excluded: {1}".format(
index, getIndigoExceptionText(e)
)
)
wrongStructures += 1
index += 1
if not (index % 1000):
print("Processed {0} structures...".format(index))
print(
"Finished indexing {0} structures. {1} wrong structures excluded".format(
index, wrongStructures
)
)
for mol in indigo.iterateSDFile(
joinPathPy("molecules/rand_queries_small.sdf", __file__)
):
qmol = indigo.loadQueryMolecule(mol.rawData())
searchSub(bingo, qmol)
searchSim(bingo, mol, 0.9, 1, "tanimoto")
searchSim(bingo, mol, 0.9, 1, "tversky 0.3 0.7")
searchSim(bingo, mol, 0.9, 1, "euclid-sub")
for mol in itertools.islice(mols, 100):
searchExact(bingo, mol)
bingo.close()
|
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import utool as ut
import vtool as vt
import itertools as it
from six.moves import range, zip, map # NOQA
(print, rrr, profile) = ut.inject2(__name__)
# DEBUG_REQUERY = True
DEBUG_REQUERY = False
class FinalResults(ut.NiceRepr):
def __init__(final, shape):
final.qfx2_idx = np.full(shape, -1, dtype=np.int32)
final.qfx2_dist = np.full(shape, np.nan, dtype=np.float64)
final.qfx2_truek = np.full(shape, -1, dtype=np.int32)
def assign(final, index, idxs, dists, trueks):
final.qfx2_idx[index, :] = idxs
final.qfx2_dist[index, :] = dists
final.qfx2_truek[index, :] = trueks
def __nice__(final):
return str(final.qfx2_idx)
class TempQuery(ut.NiceRepr):
""" queries that are incomplete """
def __init__(query, vecs, invalid_axs, get_neighbors, get_axs):
# Static attributes
query.invalid_axs = invalid_axs
query.get_neighbors = get_neighbors
query.get_axs = get_axs
# Dynamic attributes
query.index = np.arange(len(vecs))
query.vecs = vecs
def __nice__(query):
return str(query.index)
def neighbors(query, temp_K):
_idxs, _dists = query.get_neighbors(query.vecs, temp_K)
idxs = vt.atleast_nd(_idxs, 2)
dists = vt.atleast_nd(_dists, 2)
# Flag any neighbors that are invalid
validflags = ~in1d_shape(query.get_axs(idxs), query.invalid_axs)
# Store results in an object
cand = TempResults(query.index, idxs, dists, validflags)
return cand
def compress_inplace(query, flags):
query.index = query.index.compress(flags, axis=0)
query.vecs = query.vecs.compress(flags, axis=0)
class TempResults(ut.NiceRepr):
def __init__(cand, index, idxs, dists, validflags):
cand.index = index
cand.idxs = idxs
cand.dists = dists
cand.validflags = validflags
def __nice__(cand):
return str(cand.index)
def compress(cand, flags):
qfx = cand.index.compress(flags, axis=0)
idx_ = cand.idxs.compress(flags, axis=0)
dist_ = cand.dists.compress(flags, axis=0)
valid_ = cand.validflags.compress(flags, axis=0)
return TempResults(qfx, idx_, dist_, valid_)
def done_flags(cand, num_neighbs):
return cand.validflags.sum(axis=1) >= num_neighbs
def done_part(cand, num_neighbs):
# Find the first `num_neighbs` complete columns in each row
rowxs, colxs = np.where(cand.validflags)
unique_rows, groupxs = vt.group_indices(rowxs, assume_sorted=True)
first_k_groupxs = [groupx[0:num_neighbs] for groupx in groupxs]
if DEBUG_REQUERY:
assert all(ut.issorted(groupx) for groupx in groupxs)
assert all([len(group) == num_neighbs for group in first_k_groupxs])
chosen_xs = np.array(ut.flatten(first_k_groupxs), dtype=np.int)
# chosen_xs = np.hstack(first_k_groupxs)
# then convert these to multi-indices
done_rows = rowxs.take(chosen_xs)
done_cols = colxs.take(chosen_xs)
multi_index = (done_rows, done_cols)
# done_shape = (cand.validflags.shape[0], num_neighbs)
# flat_xs = np.ravel_multi_index(multi_index, done_shape)
flat_xs = np.ravel_multi_index(multi_index, cand.idxs.shape)
_shape = (-1, num_neighbs)
idxs = cand.idxs.take(flat_xs).reshape(_shape)
dists = cand.dists.take(flat_xs).reshape(_shape)
trueks = colxs.take(chosen_xs).reshape(_shape)
if DEBUG_REQUERY:
# dists2 = dists.copy()
for count, (row, cols) in enumerate(zip(unique_rows, groupxs)):
pass
assert np.all(np.diff(dists, axis=1) >= 0)
valid = cand.validflags.take(flat_xs).reshape(_shape)
assert np.all(valid)
return idxs, dists, trueks
def in1d_shape(arr1, arr2):
return np.in1d(arr1, arr2).reshape(arr1.shape)
def requery_knn(get_neighbors, get_axs, qfx2_vec, num_neighbs, invalid_axs=[],
pad=2, limit=4, recover=True):
"""
Searches for `num_neighbs`, while ignoring certain matches. K is
increassed until enough valid neighbors are found or a limit is reached.
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.neighbor_index import * # NOQA
>>> import ibeis
>>> qreq_ = ibeis.testdata_qreq_(defaultdb='testdb1', a='default')
>>> qreq_.load_indexer()
>>> indexer = qreq_.indexer
>>> qannot = qreq_.internal_qannots[1]
>>> qfx2_vec = qannot.vecs
>>> ibs = qreq_.ibs
>>> qaid = qannot.aid
>>> impossible_aids = ibs.get_annot_groundtruth(qaid, noself=False)
>>> invalid_axs = np.array(ut.take(indexer.aid2_ax, impossible_aids))
>>> pad = 0
>>> limit = 1
>>> num_neighbs = 3
>>> def get_neighbors(vecs, temp_K):
>>> return indexer.flann.nn_index(vecs, temp_K, checks=indexer.checks,
>>> cores=indexer.cores)
>>> get_axs = indexer.get_nn_axs
>>> res = requery_knn(
>>> get_neighbors, get_axs, qfx2_vec, num_neighbs, invalid_axs, pad,
>>> limit, recover=True)
>>> qfx2_idx, qfx2_dist = res
>>> assert np.all(np.diff(qfx2_dist, axis=1) >= 0)
Ignore:
>>> from ibeis.algo.hots.neighbor_index import * # NOQA
>>> from ibeis.algo.hots.requery_knn import * # NOQA
>>> max_k = 9
>>> n_pts = 5
>>> num_neighbs = 3
>>> temp_K = num_neighbs * 2
>>> #
>>> # Create dummy data
>>> rng = np.random.RandomState(0)
>>> tx2_idx_full = rng.randint(0, 10, size=(n_pts, max_k))
>>> tx2_idx_full[:, 0] = 0
>>> tx2_dist_full = np.meshgrid(np.arange(max_k), np.arange(n_pts))[0] / 10
>>> tx2_dist_full += (rng.rand(n_pts, max_k) * 10).astype(np.int) / 100
>>> qfx2_vec = np.arange(n_pts)[:, None]
>>> vecs = qfx2_vec
>>> #
>>> pad = 0
>>> limit = 1
>>> recover = True
>>> #
>>> invalid_axs = np.array([0, 1, 2, 5, 7, 9])
>>> get_axs = ut.identity
>>> #
>>> def get_neighbors(vecs, temp_K):
>>> # simulates finding k nearest neighbors
>>> idxs = tx2_idx_full[vecs.ravel(), 0:temp_K]
>>> dists = tx2_dist_full[vecs.ravel(), 0:temp_K]
>>> return idxs, dists
>>> #
>>> res = requery_knn(
>>> get_neighbors, get_axs, qfx2_vec, num_neighbs, invalid_axs, pad,
>>> limit, recover=True)
>>> qfx2_idx, qfx2_dist = res
"""
# Alloc space for final results
shape = (len(qfx2_vec), num_neighbs)
final = FinalResults(shape) # NOQA
query = TempQuery(qfx2_vec, invalid_axs, get_neighbors, get_axs)
temp_K = num_neighbs + pad
assert limit > 0, 'must have at least one iteration'
at_limit = False
for count in it.count():
# print('count = %r' % (count,))
cand = query.neighbors(temp_K)
# Find which query features have found enough neighbors
done_flags = cand.done_flags(num_neighbs)
if DEBUG_REQUERY:
print('count = %r' % (count,))
assert np.all(np.diff(cand.dists, axis=1) >= 0)
print('done_flags = %r' % (done_flags,))
# Move any done queries into results and compress the query
if np.any(done_flags):
# Get the valid part of the results
done = cand.compress(done_flags)
idxs, dists, trueks = done.done_part(num_neighbs)
final.assign(done.index, idxs, dists, trueks)
if DEBUG_REQUERY:
assert np.all(np.diff(dists, axis=1) >= 0)
blocks = final.qfx2_dist
nanelem_flags = np.isnan(blocks)
nanrow_flags = np.any(nanelem_flags, axis=1)
assert np.all(nanelem_flags.sum(axis=1)[nanrow_flags] == num_neighbs)
assert np.all(np.diff(blocks[~nanrow_flags], axis=1) >= 0)
print('final.qfx2_dist')
print(final.qfx2_dist)
if np.all(done_flags):
# If everything was found then we are done
break
else:
# Continue query with remaining invalid results
query.compress_inplace(~done_flags)
# double the search space
temp_K *= 2
at_limit = limit is not None and count >= limit
if at_limit:
if len(done_flags) == 0:
import utool
utool.embed()
print('[knn] Hit limit=%r and found %d/%d' % (
limit, sum(done_flags), len(done_flags)))
break
if at_limit and recover:
# If over the limit, then we need to do the best with what we have
# otherwise we would just return nan
best = cand.compress(~done_flags)
print('[knn] Recover for %d features' % (len(best.index)))
# Simply override the last indices to be valid and use those
best.validflags[:, -num_neighbs:] = True
# Now we can find a valid part
idxs, dists, trueks = best.done_part(num_neighbs)
final.assign(best.index, idxs, dists, trueks)
if DEBUG_REQUERY:
print('final.qfx2_dist')
print(final.qfx2_dist)
return final.qfx2_idx, final.qfx2_dist
|
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from models import resnet18
from models import inception_v3
from models import vgg16_bn
import torch
def filter_state_dict(state_dict):
from collections import OrderedDict
if 'state_dict' in state_dict.keys():
state_dict = state_dict['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if 'sub_block' in k:
continue
if 'module' in k:
new_state_dict[k[7:]] = v
else:
new_state_dict[k] = v
return new_state_dict
def load_w(model, path):
pref = next(model.parameters())
model.load_state_dict(torch.load(path, map_location=pref.device))
class Cifar10Renormalize(torch.nn.Module):
def __init__(self, wrap):
super().__init__()
self.wrap = wrap
def forward(self, x):
x = x - x.new_tensor([0.4914, 0.4822, 0.4465]).reshape(1, 3, 1, 1)
x = x / x.new_tensor([0.2023, 0.1994, 0.201]).reshape(1, 3, 1, 1)
return self.wrap(x)
class ImageNetRenormalize(torch.nn.Module):
def __init__(self, wrap):
super().__init__()
self.wrap = wrap
self.sizer = transforms.Resize(
384, interpolation=InterpolationMode.BICUBIC)
def forward(self, x):
x = (x - 0.5) / 0.5
return self.wrap(self.sizer(x))
def get_model_for_attack(model_name):
if model_name == 'model_vgg16bn':
model = vgg16_bn(pretrained=True)
elif model_name == 'model_resnet18':
model = resnet18(pretrained=True)
elif model_name == 'model_inceptionv3':
model = inception_v3(pretrained=True)
elif model_name == 'model_vitb':
from mnist_vit import ViT, MegaSizer
model = MegaSizer(ImageNetRenormalize(
ViT('B_16_imagenet1k', pretrained=True)))
elif model_name.startswith('model_hub:'):
_, a, b = model_name.split(":")
model = torch.hub.load(a, b, pretrained=True)
model = Cifar10Renormalize(model)
elif model_name.startswith('model_mnist:'):
_, a = model_name.split(":")
model = torch.load('mnist.pt')[a]
elif model_name.startswith('model_ex:'):
_, a = model_name.split(":")
model = torch.load(a)
else:
raise ValueError(f'Model f{model_name} does not exist.')
return model
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import d1_common.utils.filesystem
import d1_common.utils.ulog
import django
import django.core.handlers.wsgi
import django.http
import django.utils
import django.utils.datastructures
os.environ["DJANGO_SETTINGS_MODULE"] = "d1_gmn.settings"
# Add the service folder to the search path.
sys.path.append(d1_common.utils.filesystem.abs_path("."))
sys.path.append(d1_common.utils.filesystem.abs_path(".."))
class D1WSGIRequest(django.core.handlers.wsgi.WSGIRequest):
"""Overrides the _load_post_and_files method of the standard Django WSGI handler to
ensure that PUT message bodies are parsed the same way as a POST."""
def _load_post_and_files(self):
# Populates self._post and self._files
if self.method in ("POST", "PUT"):
if self.environ.get("CONTENT_TYPE", "").startswith("multipart"):
self._raw_post_data = ""
try:
self._post, self._files = self.parse_file_upload(
self.META, self.environ["wsgi.input"]
)
except:
# An error occurred while parsing POST data. Since, when formatting the
# error, the request handler might access self.POST, set self._post
# and self._file to prevent attempts to parse POST data again.
self._post = django.http.QueryDict("")
self._files = django.utils.datastructures.MultiValueDict()
# Mark that an error occurred. This allows self.__repr__ to be
# explicit about it instead of simply representing an empty POST
self._post_parse_error = True
raise
else:
self._post, self._files = (
django.http.QueryDict(self._raw_post_data, encoding=self._encoding),
django.utils.datastructures.MultiValueDict(),
)
else:
self._post, self._files = (
django.http.QueryDict("", encoding=self._encoding),
django.utils.datastructures.MultiValueDict(),
)
# noinspection PyClassHasNoInit
class D1WSGIHandler(django.core.handlers.wsgi.WSGIHandler):
request_class = D1WSGIRequest
django.setup(set_prefix=False)
application = D1WSGIHandler()
|
# Copyright (C) 2018-2021 Seoul National University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup Crane."""
import os
import json
from glob import glob
from pathlib import Path
from setuptools import find_packages, setup, Command
from setuptools.command.build_py import build_py as org_build_py
from setuptools.command.sdist import sdist as org_sdist
from setuptools.command.develop import develop as org_develop
from subprocess import check_call, check_output, CalledProcessError
from tempfile import TemporaryDirectory
import unasync
README = Path(__file__).parent / "README.md"
with open(README, "r") as fh:
long_description = fh.read()
def git_clone(url: str, branch=None, dest=None) -> None:
branch_args = [f"--branch={branch}"] if branch else []
dest_args = [dest] if dest else []
url = f"https://github.com/{url}"
check_call(["git", "clone", "--depth=1", *branch_args, url, *dest_args])
def check_git_outputs(*args: str) -> str:
output = check_output(["git", *args])
return output.decode("utf-8").strip()
class checkpoint(Command):
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
git_branch = check_git_outputs("rev-parse", "--abbrev-ref", "HEAD")
git_commit = check_git_outputs("rev-list", "-1", "HEAD")
git_timestamp = check_git_outputs("show", "-s", "--format=%ci", git_commit)
ckpt_dict = {
"git_branch": git_branch,
"git_commit": git_commit,
"git_timestamp": git_timestamp,
}
file = Path(__file__).parent / "crane/checkpoint.json"
with open(file, "w") as f:
json.dump(ckpt_dict, f)
except CalledProcessError:
if "NO_GIT_INFO" not in os.environ:
raise
class genSyncAPI(Command):
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
files = glob("crane/**/*.py", recursive=True)
unasync.unasync_files(
files,
rules=[
unasync.Rule(
"crane/lib/aio",
"crane/lib/sync",
additional_replacements={
"aio": "sync",
"anext": "next",
"await_if_coro": "return_non_coro",
"aclose": "close",
"aiter_lines": "iter_lines",
"async_sleep": "sync_sleep",
},
)
],
)
class build_py(org_build_py):
def run(self):
self.run_command("gen_sync_api")
self.run_command("checkpoint")
super().run()
class sdist(org_sdist):
def run(self):
self.run_command("gen_sync_api")
self.run_command("checkpoint")
super().run()
class develop(org_develop):
def run(self):
self.run_command("gen_sync_api")
self.run_command("checkpoint")
super().run()
COMMON_DEPS = [
"ephemeral_port_reserve==1.1.4",
"typing_extensions",
"termcolor==1.1.0",
"async_timeout==3.0.1",
"mashumaro==2.2",
"netifaces==0.10.9",
"protobuf==3.11.3",
"tabulate==0.8.6",
"pydantic==1.8.2",
"aiofiles==0.7.0",
"httpx==0.16.1",
"gql==3.0.0a6",
"typer[all]==0.3.2",
"rich==10.9.0",
"python-dateutil==2.8.2",
]
setup(
name="crane",
version="0.3.2",
author="snuspl",
author_email="yuyupopo@snu.ac.kr",
description="A GPU Cluster Manager for DL Workloads",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/snuspl/crane",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache 2.0 License",
"Operating System :: OS Independent",
],
packages=find_packages(
exclude=(
"tests",
"tests.*",
)
),
cmdclass={
"build_py": build_py,
"sdist": sdist,
"develop": develop,
"checkpoint": checkpoint,
"gen_sync_api": genSyncAPI,
},
entry_points={
"console_scripts": [
"crane = crane.cli.user:app",
]
},
include_package_data=True,
install_requires=COMMON_DEPS,
)
|
import pytest
import graphscope.nx as nx
@pytest.mark.usefixtures("graphscope_session")
class TestVoteRankCentrality:
@pytest.mark.skip(reason="not support list as attribute")
def test_voterank_centrality_1(self):
G = nx.Graph()
G.add_edges_from([(7, 8), (7, 5), (7, 9), (5, 0), (0, 1), (0, 2), (0, 3),
(0, 4), (1, 6), (2, 6), (3, 6), (4, 6)])
assert [0, 7, 6] == nx.voterank(G)
@pytest.mark.skip(reason="not support list as attribute")
def test_voterank_centrality_2(self):
G = nx.florentine_families_graph()
d = nx.voterank(G, 4)
exact = ['Medici', 'Strozzi', 'Guadagni', 'Castellani']
assert exact == d
|
from enum import Enum
# This comes from crosswire.org "Standard OSIS Codes for Bible Editions"
# noinspection SpellCheckingInspection
from typing import Dict
class Version(Enum):
# Ancient Language Editions
STEPHANUS_GNT = "Steph"
LATIN_VULGATE = "Vul"
LATIN_ERASMUS = "Erasmus"
MASORETIC_TEXT = "MT"
BIBLIA_HEBRAICA_STUTTGARTENSIA = "BHS"
NESTLE_ALAND = "NA"
GREEK_SEPTUAGINT = "LXX"
# English Editions
COMPLETE = "AAT"
AFRO = "ABT"
ALTERNATE_TRANSLATION = "ATB"
AMERICAN_STANDARD = "ASV"
AMPLIFIED = "AB"
ANALYTICAL_LITERAL = "ALT"
AMERICAN_SIGN_LANGUAGE = "ASL"
AUTHORIZED = "AV"
BARCLAY = "BAR"
BIKER = "BB"
WORLWIDE_ENGLISH = "BWE"
CHRISTIAN_COMMUNITY = "CCB"
COMMON = "COM"
COVENANT = "COV"
COMPLETE_JEWISH = "CJB"
CONCORDANT = "CONC"
CONTEMPORARY_ENGLISH = "CEV"
COTTON_PATCH = "CPV"
DARBY = "DAR"
DOUAY_RHEIMS = "DR"
DAVID_ROBERT_PALMER = "DRP"
ENGLISH_MAJORITY_TEXT = "EMTV"
EXTREME = "ENT"
EASY_TO_READ = "ERV"
ENGLISH_STANDARD = "ESV"
FERRAR_FENTON = "FF"
GODS_LIVING_WORD = "GLW"
GODS_NEW_COVENANT = "GNC"
GODS_WORD = "GW"
GOOD_NEWS = "GNB"
HOLMAN_CHRISTIAN_STANDARD = "HCSB"
INTERNATIONAL_CHILDRENS = "ICB"
INTERNATIONAL_STANDARD_BIBLE = "ISB"
INTERNATIONAL_STANDARD_VERSION = "ISV"
J_B_PHILLIPS = "JBP"
JEWISH_NEW_TESTAMENT = "JNT"
KING_JAMES = "KJV"
KING_JAMES_DEFINED = "DKJB"
KING_JAMES_II = "KJII"
KING_JAMES_21 = "KJ21"
KING_JAMES_2000 = "KJ2000"
LITERAL = "LITV"
KING_JAMES_MODERN = "MKJV"
REVISED_AUTHORISED = "RAV"
KING_JAMES_REVISED = "RKJV"
THIRD_MILLENIUM = "TMB"
KING_JAMES_UPDATED = "UKJV"
LIVING = "LB"
MODERN_AMERICAN_ENGLISH_VERNACULAR = "MAEV"
MODERN_LANGUAGE = "MLB"
JAMES_MOFFATT = "Mof"
NEW_AMERICAN = "NAB"
NEW_AMERICAN_STANDARD = "NASB"
NEW_CENTURY = "NCV"
NEW_ENGLISH_BIBLE = "NEB"
NEW_ENGLISH_TRANSLATION = "NET"
NEW_EVANGELICAL = "NEvT"
NEW_INTERNATIONAL_READERS = "NIrV"
NEW_INTERNATIONAL = "NIV"
NEW_JERUSALEM = "NJB"
NEW_KING_JAMES = "NKJV"
NEW_LIFE = "NLV"
NEW_LIVING = "NLT"
NEW_REVISED_STANDARD = "NRSV"
NEW_WORLD = "NWT"
ORIGINAL_BIBLE_PROJECT = "OBP"
ORTHODOX_STUDY = "OSB"
ORIGINAL_NEW_TESTAMENT = "ONT"
POSTMODERN = "PMB"
RECOVERY = "Rec"
REVISED_ENGLISH = "REB"
REVISED_STANDARD = "RSV"
REVISED = "RV"
SCHOCKEN = "Sch"
SIMPLE_ENGLISH = "SEB"
MESSAGE = "TM"
TODAYS_ENGLISH = "TEV"
TODAYS_NEW_INTERNATIONAL = "TNIV"
TYNDALE = "Tyn"
WEYMOUTH = "Wey"
WORLD_ENGLISH = "WEB"
CHARLES_B_WILLIAMS = "Wms"
WESLEYS = "WNT"
WUEST = "Wuest"
WYCLIFFE = "Wyc"
YES_WORD = "Yes"
YOUNGS_LITERAL = "YLT"
@property
def title(self) -> str:
return _VERSION_TITLES.get(self, "")
# noinspection SpellCheckingInspection
_VERSION_TITLES: Dict[Version, str] = {
# Ancient Language Editions
Version.STEPHANUS_GNT: "Stephanus GNT",
Version.LATIN_VULGATE: "Latin Vulgate",
Version.LATIN_ERASMUS: "Erasmus Latin translation by Desiderius Erasmus Roterodamus",
Version.MASORETIC_TEXT: "Masoretic text",
Version.BIBLIA_HEBRAICA_STUTTGARTENSIA: "Biblia Hebraica Stuttgartensia",
Version.NESTLE_ALAND: "Nestle-Aland Greek New Testament",
Version.GREEK_SEPTUAGINT: "Greek Septuagint",
# English Editions
Version.COMPLETE: "The Complete Bible: An American Translation, by Edgar Goodspeed and J. M. Powis Smith",
Version.AFRO: "The Afro Bible Translation",
Version.ALTERNATE_TRANSLATION: "The Alternate Translation Bible",
Version.AMERICAN_STANDARD: "American Standard Version",
Version.AMPLIFIED: "The Amplified Bible",
Version.ANALYTICAL_LITERAL: "Analytical-Literal Translation",
Version.AMERICAN_SIGN_LANGUAGE: "American Sign Language Translation",
Version.AUTHORIZED: "Authorized Version",
Version.BARCLAY: "The New Testament: A New Translation, by William Barclay",
Version.BIKER: "The Biker Bible",
Version.WORLWIDE_ENGLISH: "Bible in WorldWide English",
Version.CHRISTIAN_COMMUNITY: "Christian Community Bible",
Version.COMMON: "The Common Edition: New Testament",
Version.COVENANT: "Covenant Edition New Testament",
Version.COMPLETE_JEWISH: "Complete Jewish Bible",
Version.CONCORDANT: "Concordant Version",
Version.CONTEMPORARY_ENGLISH: "Contemporary English Version",
Version.COTTON_PATCH: "Cotton Patch Version, tr. Clarence Jordan",
Version.DARBY: "Darby",
Version.DOUAY_RHEIMS: "Douay-Rheims",
Version.DAVID_ROBERT_PALMER: "David Robert Palmer's translations of the gospels",
Version.ENGLISH_MAJORITY_TEXT: "English Majority Text Version",
Version.EXTREME: "Extreme New Testament",
Version.EASY_TO_READ: "Easy-to-Read Version",
Version.ENGLISH_STANDARD: "English Standard Version",
Version.FERRAR_FENTON: "Ferrar Fenton Bible",
Version.GODS_LIVING_WORD: "God's Living Word",
Version.GODS_NEW_COVENANT: "God's New Covenant: A New Testament Translation, by Heinz W. Cassirer",
Version.GODS_WORD: "God's Word",
Version.GOOD_NEWS: "Good News Bible",
Version.HOLMAN_CHRISTIAN_STANDARD: "Holman Christian Standard Bible",
Version.INTERNATIONAL_CHILDRENS: "International Children's Bible",
Version.INTERNATIONAL_STANDARD_BIBLE: "International Standard Bible",
Version.INTERNATIONAL_STANDARD_VERSION: "The International Standard Version",
Version.J_B_PHILLIPS: "New Testament in Modern English, by J. B. Phillips",
Version.JEWISH_NEW_TESTAMENT: "Jewish New Testament: A Translation of the New Testament That Expresses Its Jewishness",
Version.KING_JAMES: "King James Version",
Version.KING_JAMES_DEFINED: "Defined King James Version",
Version.KING_JAMES_II: "King James Version II",
Version.KING_JAMES_21: "King James for the 21st Century",
Version.KING_JAMES_2000: "King James 2000",
Version.LITERAL: "The Literal Translation of the Holy Bible",
Version.KING_JAMES_MODERN: "Modern King James Version",
Version.REVISED_AUTHORISED: "Revised Authorised Version",
Version.KING_JAMES_REVISED: "Revised King James New Testament",
Version.THIRD_MILLENIUM: "The Third Millenium Bible",
Version.KING_JAMES_UPDATED: "Updated King James Version",
Version.LIVING: "Living Bible",
Version.MODERN_AMERICAN_ENGLISH_VERNACULAR: "Modern American English Vernacular",
Version.MODERN_LANGUAGE: "Modern Language Bible: New Berkeley Version",
Version.JAMES_MOFFATT: "Bible: James Moffatt Translation",
Version.NEW_AMERICAN: "New American Bible",
Version.NEW_AMERICAN_STANDARD: "New American Standard Bible",
Version.NEW_CENTURY: "New Century Version",
Version.NEW_ENGLISH_BIBLE: "New English Bible",
Version.NEW_ENGLISH_TRANSLATION: "New English Translation",
Version.NEW_INTERNATIONAL_READERS: "New International Reader's Version",
Version.NEW_INTERNATIONAL: "New International Version",
Version.NEW_JERUSALEM: "New Jerusalem Bible",
Version.NEW_KING_JAMES: "New King James Version",
Version.NEW_LIFE: "New Life Version",
Version.NEW_LIVING: "New Living Translation",
Version.NEW_REVISED_STANDARD: "New Revised Standard Bible",
Version.NEW_WORLD: "New World Translation",
Version.ORIGINAL_BIBLE_PROJECT: "The Original Bible Project",
Version.ORTHODOX_STUDY: "Orthodox Study Bible",
Version.ORIGINAL_NEW_TESTAMENT: "The Original New Testament: The First Definitive Translation of the New Testament in 2000 Years, by Hugh Schonfield",
Version.POSTMODERN: "Postmodern Bible - Amos",
Version.RECOVERY: "Recovery Version",
Version.REVISED_ENGLISH: "The Revised English Bible",
Version.REVISED_STANDARD: "The Revised Standard Version",
Version.REVISED: "Revised Version",
Version.SCHOCKEN: "The Schocken Bible",
Version.SIMPLE_ENGLISH: "The Simple English Bible",
Version.MESSAGE: "The Message",
Version.TODAYS_ENGLISH: "Today's English Version",
Version.TODAYS_NEW_INTERNATIONAL: "Today's New International Version",
Version.TYNDALE: "Tyndale",
Version.WEYMOUTH: "Weymouth",
Version.WORLD_ENGLISH: "World English Bible",
Version.CHARLES_B_WILLIAMS: "The New Testament in the Language of the People",
Version.WESLEYS: "Wesley's New Testament",
Version.WUEST: "The New Testament (An Expanded Translation)",
Version.WYCLIFFE: "Wycliffe",
Version.YES_WORD: "Yes Word",
Version.YOUNGS_LITERAL: "Young's Literal Translation of the Bible",
}
DEFAULT_VERSION: Version = Version.KING_JAMES
|
import cv2
import numpy as np
import random
image = cv2.imread('image.jpg')
image_rot = cv2.imread('image_rot.jpg')
gray= cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
gray_rot = cv2.cvtColor(image_rot,cv2.COLOR_BGR2GRAY)
surf = cv2.xfeatures2d.SURF_create()
kp, desc = surf.detectAndCompute(gray,None)
kp_rot, desc_rot = surf.detectAndCompute(gray_rot, None)
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(desc,desc_rot, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.4*n.distance:
good.append([m])
random.shuffle(good)
# cv2.drawMatchesKnn expects list of lists as matches.
image_match = cv2.drawMatchesKnn(image,kp,image_rot,kp_rot,good[:10],flags=2, outImg=None)
cv2.imwrite('surf_matches.jpg',image_match)
|
from errors import CustomListTypeException
from tests.customlist_tests.base.customlist_test_base import CustomListTestBase
class CustomListExtendTests(CustomListTestBase):
def test_customListExtend_whenNonEmptyAndExtendedNonEmpty_shouldExtendAndReturnList(self):
value = [5, 6, 7]
custom_list = self.setup_list(1, 2, 3, 4)
result = custom_list.extend(value)
self.assertListEqual([1, 2, 3, 4, 5, 6, 7], custom_list.data)
self.assertListEqual([1, 2, 3, 4, 5, 6, 7], result)
def test_customListExtend_whenEmptyAndExtendedNonEmpty_shouldExtendAndReturnList(self):
value = [5, 6, 7]
custom_list = self.setup_list()
result = custom_list.extend(value)
self.assertListEqual([5, 6, 7], custom_list.data)
self.assertListEqual([5, 6, 7], result)
def test_customListExtend_whenNonEmptyAndExtendedEmpty_shouldExtendAndReturnList(self):
value = []
custom_list = self.setup_list(1, 2, 3, 4)
result = custom_list.extend(value)
self.assertListEqual([1, 2, 3, 4], custom_list.data)
self.assertListEqual([1, 2, 3, 4], result)
def test_customListExtend_whenEmptyAndCustomIterable_shouldExtendAndReturnList(self):
class CustomIterable:
def __init__(self, value):
self.is_done = False
self.value = value
def __iter__(self):
return self
def __next__(self):
if self.is_done:
raise StopIteration()
self.is_done = True
return self.value
value = CustomIterable(1)
custom_list = self.setup_list()
result = custom_list.extend(value)
self.assertListEqual([1], custom_list.data)
self.assertListEqual([1], result)
def test_customListExtend_whenExtendedNotIterable_shouldRaise(self):
value = 5
custom_list = self.setup_list(1, 2, 3, 4)
with self.assertRaises(CustomListTypeException) as context:
custom_list.extend(value)
self.assertIsNotNone(context.exception)
|
"""
Legacy CASA uses a custom MS format while CNGI uses the standard
Zarr format. These functions allow conversion between
the two as well as directly from the telescope archival science data
model (ASDM) (future growth). Note that both the MS and Zarr formats
are directories, not single files.
This package has a dependency on legacy CASA / casacore and will be
separated in the future to its own distribution apart from the rest of
the CNGI package.
To access these functions, use your favorite variation of:
``import cngi.conversion``
"""
from .convert_ms import *
from .convert_image import *
from .convert_asdm import *
from .convert_table import *
from .describe_ms import *
from .save_ms import *
from .save_image import *
from .save_asdm import *
|
import cv2
from pylot.perception.tracking.multi_object_tracker import MultiObjectTracker
class MultiObjectCV2Tracker(MultiObjectTracker):
def __init__(self, flags):
self._tracker = cv2.MultiTracker_create()
def reinitialize(self, frame, bboxes):
self._tracker = cv2.MultiTracker_create()
for (xmin, xmax, ymin, ymax) in bboxes:
bbox = (xmin, ymin, xmax - xmin, ymax - ymin)
self._tracker.add(cv2.TrackerKCF_create(), frame, bbox)
#self._tracker.add(cv2.TrackerMOSSE_create(), frame, bbox)
def track(self, frame):
ok, bboxes = self._tracker.update(frame)
if not ok:
return False, []
corners = []
for (xmin, ymin, w, h) in bboxes:
corners.append((xmin, xmin + w, ymin, ymin + h))
return True, corners
|
# Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train NMT with low level API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import threading
import time
from six.moves import queue as Queue
import tensorflow.compat.v1 as tf
from REDACTED.tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from REDACTED import rewriter_config_pb2
from REDACTED.tensorflow.python.tpu import tpu
from REDACTED.tensorflow.python.tpu import tpu_feed
from REDACTED.tensorflow.python.tpu import tpu_function
from REDACTED.tensorflow.python.tpu import training_loop
from REDACTED.tensorflow.python.tpu.ops import tpu_ops
from REDACTED.tensorflow.python.data.util import nest as data_nest
from REDACTED.tensorflow.python.framework import graph_io
from REDACTED.tensorflow_models.mlperf.models.rough.mlp_log import mlp_log
from REDACTED.tensorflow_models.mlperf.models.rough.nmt import metric
from REDACTED.tensorflow_models.mlperf.models.rough.nmt.utils import iterator_utils
_INITIAL_LOSS = 1e7
_STOP = -1
_ITEM = 1
def wrap_computation_in_while_loop(op_fn,
n,
host_name,
include_induction_variable=False):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
ops = op_fn(i) if include_induction_variable else op_fn()
if not isinstance(ops, list):
ops = [ops]
with tf.control_dependencies(ops):
return i + 1
with tf.device(device_for_host(host_name)):
return tf.while_loop(
lambda i: tf.less(i, n),
computation, [tf.constant(0)],
parallel_iterations=1)
def get_resolver(hparams):
if hparams.master:
return contrib_cluster_resolver.TPUClusterResolver(hparams.master)
elif hparams.tpu_name:
return contrib_cluster_resolver.TPUClusterResolver(hparams.tpu_name)
else:
return None
def get_host(resolver, hparams, host_id=0):
if resolver is None:
return "/replica:0/task:0"
elif hparams.master == "local":
return "/job:localhost/replica:0/task:0"
else:
job_name = resolver.get_job_name() or hparams.tpu_job_name or "tpu_worker"
return "/job:%s/task:%d" % (job_name, host_id)
def device_for_host(host_name):
return host_name + "/device:CPU:0"
def device_for_tpu_core(host_name, core=0):
return host_name + "/device:TPU_REPLICATED_CORE:%d" % core
class LowLevelRunner(object):
"""Run Train via direct session.run calls."""
def __init__(self,
hparams,
train_iterations,
eval_steps,
per_host_v1=False):
tf.logging.info("TrainLowLevelRunner: constructor")
self.feature_structure = {}
self.eval_feature_structure = {}
self.loss = None
self.infeed_queue = []
self.eval_infeed_queue = []
self.enqueue_ops = []
self.eval_enqueue_ops = []
self.dataset_initializer = []
self.eval_dataset_initializer = []
self.is_local = ((hparams.master == "") and (hparams.tpu_name is None))
self.per_host_v1 = per_host_v1
self.iterations = train_iterations
self.eval_steps = eval_steps
self.outfeed_tensors = []
self.outfeed_names = []
self.dequeue_ops = []
self.predictions = {}
self.sess = None
self.graph = tf.Graph()
self.hparams = hparams
self.num_hosts = hparams.num_shards // hparams.num_shards_per_host
with self.graph.as_default():
self.tpu_init = [tpu.initialize_system()]
self.tpu_shutdown = tpu.shutdown_system()
self.resolver = get_resolver(hparams)
session_config = tf.ConfigProto(
allow_soft_placement=True,
isolate_session_state=True,
operation_timeout_in_ms=600 * 60 * 1000,
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True)))
if self.hparams.tpu_name is None:
master = self.hparams.master
else:
cluster_spec = self.resolver.cluster_spec()
if cluster_spec:
session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
master = self.resolver.get_master()
self.sess = tf.Session(master, graph=self.graph, config=session_config)
self.sess.run(self.tpu_init)
def initialize(self, input_fn, eval_input_fn, params):
"""Initialize all the things required for training."""
tf.logging.info("TrainLowLevelRunner: initialize method")
def get_enqueue_ops_fn(host_id):
"""Generate the enqueue ops graph function."""
device = device_for_host(get_host(self.resolver, self.hparams, host_id))
if host_id in range(0, self.hparams.num_infeed_workers * 2, 2):
with tf.device(device):
params["batch_size"] = self.hparams.batch_size
params["dataset_num_shards"] = self.hparams.num_infeed_workers
params["dataset_index"] = host_id // 2
output = input_fn(params)
if not self.hparams.use_synthetic_data:
iterator = output.make_initializable_iterator()
self.dataset_initializer.append(iterator.initializer)
if host_id == 0:
f = iterator.get_next()
self.feature_structure["features"] = {
k: tf.zeros_like(f[k]) for k in f
}
else:
self.feature_structure["features"] = output
self.feature_structure["core_id"] = tf.constant([1], tf.int32)
def enqueue_ops_fn(idx):
"""Enqueue ops function for one host.."""
with tf.device(device):
sharded_inputs = []
start_idx = 0
if host_id in range(0, self.hparams.num_infeed_workers * 2, 2):
core_id = tf.constant(
host_id * self.hparams.num_shards_per_host,
shape=[1],
dtype=tf.int32)
if self.hparams.use_synthetic_data:
features = output
else:
def true_fn():
return iterator.get_next()
def false_fn():
return {
k: tf.zeros_like(self.feature_structure["features"][k])
for k in self.feature_structure["features"]
}
features = tf.cond(
tf.equal(idx % self.hparams.num_infeed_workers, host_id // 2),
true_fn, false_fn)
sharded_inputs.append(
data_nest.flatten({
"features": features,
"core_id": core_id
}))
start_idx = 1
for i in range(start_idx, self.hparams.num_shards_per_host):
sharded_inputs.append(
data_nest.flatten({
"features": {
k: tf.zeros_like(self.feature_structure["features"][k])
for k in self.feature_structure["features"]
},
"core_id":
tf.constant(
host_id * self.hparams.num_shards_per_host + i,
shape=[1],
dtype=tf.int32)
}))
infeed = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(sharded_inputs[0]))
self.infeed_queue.append(infeed)
def tpu_ordinal_fn(shard_index_in_host):
return shard_index_in_host % self.hparams.num_shards_per_host
return infeed.generate_enqueue_ops(
sharded_inputs, tpu_ordinal_function=tpu_ordinal_fn)
return enqueue_ops_fn
def get_eval_enqueue_ops_fn(host_id):
"""Generate the enqueue ops graph function."""
params["dataset_num_shards"] = self.num_hosts
params["dataset_index"] = host_id
with tf.device(
device_for_host(get_host(self.resolver, self.hparams, host_id))):
dataset = eval_input_fn(params)
iterator = dataset.make_initializable_iterator()
self.eval_dataset_initializer.append(iterator.initializer)
def enqueue_ops_fn():
"""Enqueue ops function for one host."""
per_host_sharded_inputs = []
control_deps = []
for _ in range(self.hparams.num_shards_per_host):
with tf.control_dependencies(control_deps):
features = iterator.get_next()
self.eval_feature_structure["features"] = features
flattened_inputs = data_nest.flatten(self.eval_feature_structure)
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
self.eval_infeed_queue.append(infeed)
def tpu_ordinal_fn(shard_index_in_host):
return shard_index_in_host % self.hparams.num_shards_per_host
return infeed.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_fn)
return enqueue_ops_fn
with self.graph.as_default():
if self.iterations > 0:
for i in range(self.num_hosts):
self.enqueue_ops.append(
wrap_computation_in_while_loop(
get_enqueue_ops_fn(i),
n=self.iterations,
host_name=get_host(self.resolver, self.hparams, i),
include_induction_variable=True))
if self.eval_steps > 0:
for i in range(0, self.num_hosts):
self.eval_enqueue_ops.append(
wrap_computation_in_while_loop(
get_eval_enqueue_ops_fn(i),
n=self.eval_steps,
host_name=get_host(self.resolver, self.hparams, i)))
init_tables = tf.tables_initializer()
self.sess.run(init_tables)
self.sess.run(self.dataset_initializer)
def build_model(self, model_fn, params):
"""Build the TPU model and infeed enqueue ops."""
tf.logging.info("TrainLowLevelRunner: build_model method")
def tpu_train_step(loss):
"""Generate the TPU graph."""
del loss
values = self.infeed_queue[0].generate_dequeue_op(tpu_device=0)
unflattened_inputs = data_nest.pack_sequence_as(self.feature_structure,
values)
features = unflattened_inputs["features"]
core_id = unflattened_inputs["core_id"]
new_features = {}
for k in features:
s = features[k].shape.as_list()
s = [self.hparams.num_shards, s[0] // self.hparams.num_shards] + s[1:]
new_features[k] = tf.squeeze(
tf.gather(
tf.reshape(tpu_ops.cross_replica_sum(features[k]), s), core_id),
[0])
estimator_spec = model_fn(new_features, None, tf.estimator.ModeKeys.TRAIN,
params)
loss, train_op = estimator_spec.loss, estimator_spec.train_op
with tf.control_dependencies([train_op]):
return tf.identity(loss)
@tpu_function.on_device_training_loop
def train_loop():
return training_loop.repeat(self.iterations, tpu_train_step,
[_INITIAL_LOSS])
def tpu_eval_step():
"""Generate the TPU graph."""
values = self.eval_infeed_queue[0].generate_dequeue_op(tpu_device=0)
unflattened_inputs = data_nest.pack_sequence_as(
self.eval_feature_structure, values)
features = unflattened_inputs["features"]
estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT,
params)
for k, v in six.iteritems(estimator_spec.predictions):
self.outfeed_names.append(k)
self.outfeed_tensors.append(v)
with tf.device(
device_for_tpu_core(get_host(self.resolver, self.hparams))):
outfeed_enqueue_ops = tpu_ops.outfeed_enqueue_tuple(
self.outfeed_tensors)
with tf.control_dependencies([outfeed_enqueue_ops]):
return tf.no_op()
@tpu_function.on_device_training_loop
def eval_loop():
if self.eval_steps > 0:
return training_loop.repeat(self.eval_steps, tpu_eval_step, [])
else:
return tf.no_op()
def train_eval_step():
with tf.control_dependencies(train_loop()):
return eval_loop()
def train_eval_loop():
return training_loop.repeat(self.hparams.max_train_epochs,
train_eval_step, [])
def create_dequeue_ops(host_id):
"""Create outfeed dequeue ops."""
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for v in self.outfeed_tensors:
dequeue_ops.append([])
tensor_dtypes.append(v.dtype)
tensor_shapes.append(v.shape)
for i in range(self.hparams.num_shards_per_host):
with tf.device(
device_for_host(get_host(self.resolver, self.hparams, host_id))):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes, shapes=tensor_shapes, device_ordinal=i)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
for j in range(len(outfeed_tensors)):
dequeue_ops[j] = tf.concat(dequeue_ops[j], axis=0)
return dequeue_ops
with self.graph.as_default():
if self.eval_steps <= 0:
(self.loss,) = tpu.shard(
train_loop,
inputs=[],
num_shards=self.hparams.num_shards,
outputs_from_all_shards=False,
)
else:
(
self.compile_op,
self.train_eval_op,
) = tpu.split_compile_and_shard(
train_eval_loop,
inputs=[],
num_shards=self.hparams.num_shards,
outputs_from_all_shards=False)
if self.eval_steps > 0:
for i in range(0, self.num_hosts):
self.dequeue_ops.append({})
host_dequeue_ops = create_dequeue_ops(i)
for j, dequeue_tenor in enumerate(host_dequeue_ops):
self.dequeue_ops[i][self.outfeed_names[j]] = dequeue_tenor
global_initializer = tf.global_variables_initializer()
local_initializer = tf.local_variables_initializer()
self.sess.run(global_initializer)
self.sess.run(local_initializer)
graph_io.write_graph(
self.graph.as_graph_def(add_shapes=True), self.hparams.out_dir,
"graph.pbtxt")
self.saver = tf.train.Saver()
def train(self,
start_step,
train_steps,
num_threads=2,
checkpoint_threads=None):
"""Run the Train loop on the TPU device."""
tf.logging.info("TrainLowLevelRunner: train for %d steps in total",
train_steps)
def infeed_thread_fn(sess, enqueue_ops):
assert train_steps % self.iterations == 0
steps = train_steps // self.iterations
for _ in range(steps):
sess.run([enqueue_ops])
def checkpoint_thread_fn(saver, sess):
saver.save(sess, self.hparams.out_dir + "/model.ckpt-%d" % (start_step + cur_step))
infeed_thread = threading.Thread(
target=infeed_thread_fn, args=(self.sess, self.enqueue_ops))
infeed_thread.start()
cur_step = 0
thread_id = 0
need_join = False
if checkpoint_threads is None and num_threads > 0:
checkpoint_threads = []
need_join = True
for i in range(num_threads):
checkpoint_threads.append(None)
while cur_step < train_steps:
start = time.time()
tf.logging.info("TrainLowLevelRunner: start train step:%d",
start_step + cur_step)
cur_step += self.iterations
loss = self.sess.run([self.loss])
tf.logging.info("TrainLowLevelRunner: sess run loss: %s", loss)
if num_threads > 0:
if checkpoint_threads[thread_id] is not None:
checkpoint_threads[thread_id].join()
checkpoint_threads[thread_id] = threading.Thread(
target=checkpoint_thread_fn, args=(self.saver, self.sess))
checkpoint_threads[thread_id].start()
thread_id += 1
if thread_id >= num_threads:
thread_id = 0
end = time.time()
tf.logging.info(
"TrainLowLevelRunner: step time {} sec {} examples/sec".format(
end - start,
self.iterations * self.hparams.batch_size / (end - start)))
infeed_thread.join()
if need_join:
for i in range(num_threads):
if checkpoint_threads[i] is not None:
checkpoint_threads[i].join()
checkpoint_threads[i] = None
def predict(self, checkpoint_path=None):
"""Run the predict loop on the TPU device."""
if not checkpoint_path:
checkpoint_path = tf.train.latest_checkpoint(self.hparams.out_dir)
if self.iterations == 0:
self.saver.restore(self.sess, checkpoint_path)
queue = Queue.Queue()
def dequeue_thread_fn(sess, dequeue_ops, i):
while True:
item = queue.get(block=True)
if item == _STOP:
return
self.predictions[i] = sess.run(dequeue_ops)
queue.task_done()
dequeue_threads = [None] * self.num_hosts
for i in range(self.num_hosts):
dequeue_threads[i] = threading.Thread(
target=dequeue_thread_fn, args=(self.sess, self.dequeue_ops[i], i))
dequeue_threads[i].start()
for step in range(self.eval_steps):
tf.logging.info("TrainAndEvalLowLevelRunner: start eval step:%d", step)
for i in range(self.num_hosts):
queue.put(_ITEM)
queue.join()
for j in range(self.num_hosts):
for i in range(self.hparams.infer_batch_size // self.num_hosts):
yield {
key: value[i] for key, value in six.iteritems(self.predictions[j])
}
for i in range(self.num_hosts):
queue.put(_STOP)
for i in range(self.num_hosts):
dequeue_threads[i].join()
def train_and_predict(self):
"""Run the predict loop on the TPU device."""
self.sess.run([self.compile_op])
# Train and eval thread.
def train_eval_thread_fn(sess, train_eval_op):
tf.logging.info("train_eval_op start")
sess.run([train_eval_op])
train_eval_thread = threading.Thread(
target=train_eval_thread_fn, args=(self.sess, self.train_eval_op))
train_eval_thread.start()
# Infeed thread.
def infeed_thread_fn(sess, train_enqueue_ops, eval_enqueue_ops, eval_init):
"""Start the infeed."""
time.sleep(150)
mlp_log.mlperf_print("init_stop", None)
mlp_log.mlperf_print("run_start", None)
mlp_log.mlperf_print(
"block_start",
None,
metadata={
"first_epoch_num": 1,
"epoch_count": 1
})
for i in range(self.hparams.max_train_epochs):
tf.logging.info("Infeed for epoch: %d", i + 1)
sess.run(eval_init)
sess.run([train_enqueue_ops])
sess.run([eval_enqueue_ops])
infeed_thread = threading.Thread(
target=infeed_thread_fn,
args=(self.sess, self.enqueue_ops, self.eval_enqueue_ops,
self.eval_dataset_initializer))
infeed_thread.start()
if self.eval_steps > 0:
eval_state = {"run_success": False, "score": 0.0}
for epoch in range(self.hparams.max_train_epochs):
predictions = list(self.predict())
mlp_log.mlperf_print(
"eval_start", None, metadata={"epoch_num": epoch + 1})
current_step = epoch * self.iterations
eval_state["score"] = metric.get_metric(self.hparams, predictions,
current_step)
tf.logging.info("Score after epoch %d: %f", epoch, eval_state["score"])
mlp_log.mlperf_print(
"eval_accuracy",
eval_state["score"] / 100.0,
metadata={"epoch_num": epoch + 1})
mlp_log.mlperf_print(
"eval_stop", None, metadata={"epoch_num": epoch + 1})
mlp_log.mlperf_print(
"block_stop",
None,
metadata={
"first_epoch_num": epoch + 1,
"epoch_count": 1
})
if eval_state["score"] >= self.hparams.target_bleu:
eval_state["run_success"] = True
mlp_log.mlperf_print("run_stop", None, metadata={"status": "success"})
break
mlp_log.mlperf_print(
"block_start",
None,
metadata={
"first_epoch_num": epoch + 2,
"epoch_count": 1
})
if not eval_state["run_success"]:
mlp_log.mlperf_print("run_stop", None, metadata={"status": "abort"})
infeed_thread.join()
train_eval_thread.join()
if self.eval_steps > 0:
return eval_state["score"], current_step
else:
return None, None
|
from .widget import *
NSObject = ObjCClass('NSObject')
class TreeNode(object):
def __init__(self, *data):
self._impl = NSObject.alloc().init()
self._tree = None
self.data = data
self.children = []
class TreeDelegate_(object):
TreeDelegate = ObjCSubclass('NSOutlineView', 'TreeDelegate')
@TreeDelegate.method('@@i@')
def outlineView_child_ofItem_(self, tree, child, item):
if item is None:
key = None
else:
key = id(item)
node_id = self.interface._data[key]['children'][child]
node = self.interface._data[node_id]['node']
return node
@TreeDelegate.method('B@@')
def outlineView_isItemExpandable_(self, tree, item):
if item is None:
key = None
else:
key = id(item)
return self.interface._data[key]['children'] is not None
@TreeDelegate.method('i@@')
def outlineView_numberOfChildrenOfItem_(self, tree, item):
if item is None:
key = None
else:
key = id(item)
try:
return len(self.interface._data[key]['children'])
except TypeError:
return 0
@TreeDelegate.method('i@@@')
def outlineView_objectValueForTableColumn_byItem_(self, tree, column, item):
column_index = int(cfstring_to_string(column.identifier))
return get_NSString(str(self.interface._data[id(item)]['data'][column_index]))
@TreeDelegate.method('v@')
def outlineViewSelectionDidChange_(self, notification):
print ("tree selection changed")
TreeDelegate = ObjCClass('TreeDelegate')
class Tree(Widget):
def __init__(self, headings):
super(Tree, self).__init__()
self.headings = headings
self._tree = None
self._columns = None
self._data = {
None: {
'children': []
}
}
self.startup()
def startup(self):
self._impl = NSScrollView.alloc().init()
self._impl.setHasVerticalScroller_(True)
self._impl.setHasHorizontalScroller_(True)
self._impl.setAutohidesScrollers_(False)
self._impl.setBorderType_(NSBezelBorder)
self._impl.setTranslatesAutoresizingMaskIntoConstraints_(False)
self._tree = TreeDelegate.alloc().init()
self._tree.interface = self
self._tree.setColumnAutoresizingStyle_(NSTableViewUniformColumnAutoresizingStyle)
self._columns = [
NSTableColumn.alloc().initWithIdentifier_(get_NSString('%d' % i))
for i, heading in enumerate(self.headings)
]
for heading, column in zip(self.headings, self._columns):
self._tree.addTableColumn_(column)
cell = column.dataCell()
cell.setEditable_(False)
cell.setSelectable_(False)
column.valueForKey_(get_NSString('headerCell')).setStringValue_(get_NSString(heading))
self._tree.setOutlineTableColumn_(self._columns[0])
self._tree.setDelegate_(self._tree)
self._tree.setDataSource_(self._tree)
self._impl.setDocumentView_(self._tree)
def reload_data(self):
self._tree.reloadData()
def insert(self, parent, index, *data):
if len(data) != len(self.headings):
raise Exception('Data size does not match number of headings')
node = NSObject.alloc().init()
parent_node = self._data[parent]
if parent_node['children'] is None:
parent_node['children'] = []
if index is None:
parent_node['children'].append(id(node))
else:
parent_node['children'].insert(index, id(node))
self._data[id(node)] = {
'node': node,
'data': data,
'children': None,
}
self.reload_data()
return id(node)
|
# -*- coding: utf-8 -*-
import abc
class Fishing(object):
"""
钓鱼模板基类
"""
__metaclass__ = abc.ABCMeta
def finishing(self):
"""
钓鱼方法中,确定了要执行哪些操作才能钓鱼
"""
self.prepare_bait()
self.go_to_riverbank()
self.find_location()
print("start fishing")
@abc.abstractmethod
def prepare_bait(self):
pass
@abc.abstractmethod
def go_to_riverbank(self):
pass
@abc.abstractmethod
def find_location(self):
pass
class JohnFishing(Fishing):
"""
John 也想去钓鱼,它必须实现钓鱼三步骤
"""
def prepare_bait(self):
"""
从淘宝购买鱼饵
"""
print("John: buy bait from Taobao")
def go_to_riverbank(self):
"""
开车去钓鱼
"""
print("John: to river by driving")
def find_location(self):
"""
在岛上选择钓点
"""
print("John: select location on the island")
class SimonFishing(Fishing):
"""
Simon 也想去钓鱼,它也必须实现钓鱼三步骤
"""
def prepare_bait(self):
"""
从京东购买鱼饵
"""
print("Simon: buy bait from JD")
def go_to_riverbank(self):
"""
骑自行车去钓鱼
"""
print("Simon: to river by biking")
def find_location(self):
"""
在河边选择钓点
"""
print("Simon: select location on the riverbank")
if __name__ == '__main__':
# John 去钓鱼
f = JohnFishing()
f.finishing()
# Simon 去钓鱼
f = SimonFishing()
f.finishing()
# 模板方法模式是结构最简单的行为型设计模式,在其结构中只存在父类与子类之间的继承关系。
# 通过使用模板方法模式,可以将一些复杂流程的实现步骤封装在一系列基本方法中,
# 在抽象父类中提供一个称之为模板方法的方法来定义这些基本方法的执行次序,而通过其子类来覆盖某些步骤,
# 从而使得相同的算法框架可以有不同的执行结果。模板方法模式提供了一个模板方法来定义算法框架,而某些具体步骤的实现可以在其子类中完成。
|
import os
import sys
import yaml
import json
from config import Config
from devita.sfo import sfo
class BackendClient:
def __init__(self, config):
self.config = config
# Returns an array of Title ID, Title pairs.
def get_games(self):
# May still be useful if more info on a game is needed.
# url = 'https://rpcs3.net/compatibility?api=v1&g='
results = []
game_paths = []
with open(self.config.games_yml) as games_file:
games_yml = yaml.load(games_file, Loader=yaml.SafeLoader)
if games_yml:
for game in games_yml:
game_paths.append(games_yml[game])
game_paths.append(self.config.game_directory)
# This is probably crazy inefficient.
for search_dir in game_paths:
for root, dirs, files in os.walk(search_dir):
for file in files:
if file == 'EBOOT.BIN':
# Search for EBOOT.BIN to find actual games,
# then PARAM.SFO is one level up from EBOOT.BIN.
bin_path = self.config.joinpath(root, file)
sfo_path = bin_path.replace(
os.path.join('USRDIR', 'EBOOT.BIN'),
'PARAM.SFO')
# PARAM.SFO is read as a binary file,
# so all keys must also be in binary.
param_sfo = sfo(sfo_path)
title_id = param_sfo.params[bytes('TITLE_ID', 'utf-8')]
title = param_sfo.params[bytes('TITLE', 'utf-8')]
# Convert results to strings before return.
results.append([
str(title_id, 'utf-8'),
str(title, 'utf-8')])
return results
def get_game_path(self, game_id):
with open(self.config.games_yml) as games_file:
games_yml = yaml.load(games_file, Loader=yaml.SafeLoader)
if games_yml:
try:
game_path = games_yml[game_id]
game_path = self.config.joinpath(game_path, 'PS3_GAME')
return game_path
# If the game is not found in games.yml, we will search config.game_path.
except KeyError:
pass
for folder in os.listdir(self.config.game_directory):
check_path = self.config.joinpath(self.config.game_directory, folder)
# Check that PARAM.SFO exists before loading it.
sfo_path = self.config.joinpath(check_path, 'PARAM.SFO')
if os.path.exists(sfo_path):
param_sfo = sfo(sfo_path)
# If PARAM.SFO contains game_id, we found the right path.
if bytes(game_id, 'utf-8') in param_sfo.params[bytes('TITLE_ID', 'utf-8')]:
return check_path
# If we manage to get here, we should really raise an exception.
raise FileNotFoundError
def get_state_changes(self, old_list, new_list):
old_dict = {x.game_id: x.local_game_state for x in old_list}
new_dict = {x.game_id: x.local_game_state for x in new_list}
result = []
# removed games
result.extend(LocalGame(id, LocalGameState.None_) for id in old_dict.keys() - new_dict.keys())
# added games
result.extend(local_game for local_game in new_list if local_game.game_id in new_dict.keys() - old_dict.keys())
# state changed
result.extend(LocalGame(id, new_dict[id]) for id in new_dict.keys() & old_dict.keys() if new_dict[id] != old_dict[id])
return result |
"""
"""
__all__ = [
'FlagField', 'BoolField'
]
import ctypes
from calpack.models.fields.Fields import Field
class FlagField(Field):
"""
A custom field for handling single bit 'flags'.
:param bool default_val: the default value of the field (default False)
"""
c_type = ctypes.c_uint8
def __init__(self, default_val=False):
super(FlagField, self).__init__(default_val)
def c_to_py(self, c_field):
return bool(c_field)
def py_to_c(self, val):
if not isinstance(val, FlagField) and not isinstance(val, bool):
raise TypeError("Must be of type `FlagField` or `bool`")
return int(val)
def create_field_c_tuple(self):
return (self.field_name, self.c_type, 1)
class BoolField(Field):
"""
A custom field for handling Boolean types
"""
c_type = ctypes.c_bool
def __init__(self, default_val=False):
super(BoolField, self).__init__(default_val)
self.bit_len = ctypes.sizeof(self.c_type)
def py_to_c(self, val):
if not isinstance(val, bool) and not isinstance(val, BoolField):
raise TypeError("Must be of type `bool` or `BoolField`!")
return super(BoolField, self).py_to_c(val)
|
from torchexpo import modules
from torchexpo import vision
from torchexpo import nlp
from torchexpo.version import __version__ |
/home/runner/.cache/pip/pool/ae/5b/5d/e981f613e78d042ef2f334c0f8f444f7d65d3d8ceb407effafc2e1c1a4 |
import os
from typing import Union, Any, Optional, Iterable, Dict, TYPE_CHECKING, Tuple, cast
if TYPE_CHECKING:
from pyfileconf.main import PipelineManager
from mixins.repr import ReprMixin
from pyfileconf.basemodels.config import ConfigBase
from pyfileconf.config.models.file import ActiveFunctionConfigFile
from pyfileconf.exceptions.config import ConfigManagerNotLoadedException, CannotResolveConfigDependenciesException
from pyfileconf.logic.get import _get_from_nested_obj_by_section_path
from pyfileconf.logic.set import _set_in_nested_obj_by_section_path
from pyfileconf.config.models.interfaces import ConfigSectionOrConfig
from pyfileconf.config.models.section import ConfigSection, ActiveFunctionConfig
from pyfileconf.plugin import manager
from pyfileconf.sectionpath.sectionpath import SectionPath
class ConfigManager(ReprMixin):
repr_cols = ['basepath', 'section']
def __init__(self, basepath: str, pipeline_manager_name: str, main_section: Optional[ConfigSection] = None):
self.section = main_section
self.basepath = basepath
self.pipeline_manager_name = pipeline_manager_name
self.local_config = ActiveFunctionConfig()
def __getattr__(self, item):
return getattr(self.section, item)
def __dir__(self):
exposed_methods = [
'load',
'get',
'update'
]
exposed_attrs = [
'basepath',
'pipeline_manager_name',
]
return exposed_methods + exposed_attrs + list(self.section.config_map.keys())
def load(self):
self.section = ConfigSection.from_files(self.basepath)
def update(
self, d_: dict=None, section_path_str: str=None, pyfileconf_persist: bool = True, **kwargs
) -> Tuple[ConfigBase, bool]:
"""
:param d_:
:param section_path_str:
:param pyfileconf_persist:
:param kwargs:
:return: new config, whether config was updated
"""
config_obj = self._get_project_config_or_local_config_by_section_path(section_path_str)
if config_obj is None:
raise ConfigManagerNotLoadedException('no config to update')
would_update = self._determine_and_track_if_config_would_be_updated(
config_obj, section_path_str, d_, **kwargs
)
if would_update:
config_obj.update(d_, pyfileconf_persist=pyfileconf_persist, **kwargs)
return config_obj, would_update
def refresh(self, section_path_str: str) -> Tuple[ConfigBase, bool, Dict[str, Any]]:
config_obj = self._get_project_config_or_local_config_by_section_path(section_path_str)
if config_obj is None:
raise ConfigManagerNotLoadedException('no config to refresh')
would_refresh = self._determine_and_track_if_config_would_be_refreshed(config_obj, section_path_str)
if would_refresh:
updates = config_obj.refresh()
else:
updates = {}
return config_obj, would_refresh, updates
def refresh_dependent_configs(self, section_path_str: str):
from pyfileconf import context
full_sp = SectionPath.join(self.pipeline_manager_name, section_path_str)
update_deps = {*context.force_update_dependencies[full_sp.path_str]}
all_updated_deps = set()
while update_deps:
_refresh_configs(update_deps)
all_updated_deps.update(update_deps)
# Get any newly updated dependencies caused by process of updating dependencies
new_update_deps = context.force_update_dependencies[full_sp.path_str].difference(all_updated_deps)
if update_deps == new_update_deps:
# Not expected, but somehow got stuck in an infinite loop where it is
# always trying to update the same dependency
raise CannotResolveConfigDependenciesException(update_deps)
update_deps = new_update_deps
def reset(self, section_path_str: str=None, allow_create: bool = False) -> Tuple[ConfigBase, bool]:
"""
Resets a function or section config to default. If no section_path_str is passed, resets local config.
To reset all configs, use .load() instead.
:return: the default configuration
"""
default = self._get_default_func_or_section_config(section_path_str, create=allow_create)
new_config, updated = self.set(section_path_str, default, allow_create=allow_create)
return new_config, updated
def pop(self, key: str, section_path_str: str=None) -> Any:
config_obj = self._get_project_config_or_local_config_by_section_path(section_path_str)
if config_obj is None:
raise ConfigManagerNotLoadedException('no config to pop')
return config_obj.pop(key)
def get(self, section_path_str: str) -> Optional[ActiveFunctionConfig]:
"""
Handles config inheritance to get the active config for a section or function
Args:
section_path_str:
Returns:
"""
config = self._get_func_or_section_configs(section_path_str)
if self.section is None:
raise ConfigManagerNotLoadedException('call .load() on ConfigManager before .get()')
# First override for function defaults is global project config
section_configs = [self.section.config]
# Get configs, in order of highest level to lowest level. Will go from project to highest section,
# down to lowest section.
section_path = SectionPath(section_path_str)
full_section = ''
for section in section_path[:-1]: # skip the last section or function for special handling at end
full_section += section # rebuilding full section path str
section_configs.append(
self._get_func_or_section_configs(full_section)
)
full_section += '.'
# Last item of section_path may be another section, or the function/Pipeline itself. If it's a section,
# must add config for override, but if is function, it is already the base config so should not update.
full_section += section_path[-1]
if not self._is_function_or_pipeline_path(full_section):
# if is a section, not function/pipeline
section_configs.append(self._get_func_or_section_configs(full_section))
if config:
# Override configs. Default config is base config, then gets updated by project, then high
# level sections to low level sections
[config.update(section_config) for section_config in section_configs]
# Last, override with local config
config.update(self.local_config)
return config
def set(self, section_path_str: Optional[str] = None, value: Optional[ConfigBase] = None,
allow_create: bool = True) -> Tuple[ConfigBase, bool]:
"""
In contrast to update, completely replaces the config object.
:param section_path_str:
:param value:
:param allow_create:
:return: new config, whether config was updated
"""
if value is None: # empty config
value = ActiveFunctionConfig()
if section_path_str is None:
# updating local config
value = cast(ActiveFunctionConfig, value)
self.local_config = value
return value, True
try:
current_config = self._get_project_config_or_local_config_by_section_path(section_path_str)
except KeyError:
# This is a new config, will always update
new_config = True
else:
new_config = current_config is None
if new_config:
self._set_func_or_section_config(section_path_str, value=value, allow_create=allow_create)
return value, True
assert current_config is not None # should never fail this, for mypy
# Not a new config, need to actually determine whether would be updated
would_update = self._determine_and_track_if_config_would_be_updated(current_config, section_path_str, **value)
if would_update:
self._set_func_or_section_config(section_path_str, value=value, allow_create=allow_create)
return value, would_update
def _get_func_or_section_configs(self, section_path_str: str) -> Optional[ActiveFunctionConfig]:
"""
This get method is used to get only the config for the section path, without handling
multiple levels of config and overriding. To get the active config for a function,
use regular get method.
Args:
section_path_str:
Returns:
"""
if self.section is None:
raise ConfigManagerNotLoadedException('call .load() on ConfigManager before .get()')
if section_path_str is None:
section_path_str = self.section.name
section_path = SectionPath(section_path_str)
# Goes into nested sections, until it pulls the final config or section
config_or_section: ConfigSectionOrConfig = _get_from_nested_obj_by_section_path(self, section_path)
conf = _get_config_from_config_or_section(config_or_section)
# Now update stored config as loading may have happened during _get_config_from_config_or_section
# Want to keep the active config once it is loaded
# But if it is a section, then don't want to overwrite with config
if not isinstance(config_or_section, ConfigSection):
_set_in_nested_obj_by_section_path(self, section_path, conf)
return conf
def _set_func_or_section_config(self, section_path_str: str, value=None, allow_create: bool = True) -> None:
if self.section is None:
raise ConfigManagerNotLoadedException('call .load() on ConfigManager before .set()')
if section_path_str is None:
section_path_str = self.section.name
section_path = SectionPath(section_path_str)
if allow_create:
self._set_func_or_config_with_create(section_path, value)
else:
self._set_func_or_config_no_create(section_path, value)
def _set_func_or_config_with_create(self, section_path: SectionPath, value: Any):
obj = self
section_basepath = self.basepath
for i, section in enumerate(section_path[:-1]):
section_basepath = os.path.join(section_basepath, section)
try:
obj = getattr(obj, section)
except KeyError as e:
new_section = ConfigSection([], name=section)
obj.append(new_section)
obj = getattr(obj, section)
# Now have collection object which should hold this final object
obj.append(value)
def _set_func_or_config_no_create(self, section_path: SectionPath, value: Any):
_set_in_nested_obj_by_section_path(self, section_path, value)
def _get_default_func_or_section_config(self, section_path_str: str=None,
create: bool = False) -> Union[ActiveFunctionConfig, ConfigSection]:
if section_path_str is None:
# local config. Default is blank config
return ActiveFunctionConfig()
else:
# otherwise, load from file for default
section_path = SectionPath(section_path_str)
filepath = section_path.to_filepath(self.basepath)
try:
config_obj = _get_from_nested_obj_by_section_path(self, section_path)
except KeyError as e:
# config object not already created
if not create:
raise e
config_obj = ActiveFunctionConfig()
if isinstance(config_obj, ConfigSection):
return ConfigSection.from_files(filepath)
if isinstance(config_obj, (ActiveFunctionConfig, ActiveFunctionConfigFile)):
return ActiveFunctionConfig.from_file(filepath + '.py')
else:
raise ValueError(f'expected section path to return ConfigSection or FunctionConfig, '
f'got {config_obj} of type {type(config_obj)}')
def _is_function_or_pipeline_path(self, section_path_str: str) -> bool:
section_path = SectionPath(section_path_str)
# Goes into nested sections, until it pulls the final config or section
config_or_section: ConfigSectionOrConfig = _get_from_nested_obj_by_section_path(self, section_path)
if isinstance(config_or_section, ConfigSection):
# must be section, not individual pipeline or function
return False
elif isinstance(config_or_section, (ActiveFunctionConfig, ActiveFunctionConfigFile)):
# must be individual function as Config is returned
return True
else:
raise ValueError(f'expected Config or ConfigSection, got {config_or_section} of type {config_or_section}')
def _get_project_config_or_local_config_by_section_path(self, section_path_str: Optional[str]
) -> Optional[ActiveFunctionConfig]:
if section_path_str is not None:
config_obj = self._get_func_or_section_configs(section_path_str)
else:
# If no section passed, update local config
config_obj = self.local_config
return config_obj
def _track_pre_update(self, config: ConfigBase, section_path_str: str, all_updates: Dict[str, Any]):
manager.plm.hook.pyfileconf_pre_config_changed(
manager=self, orig_config=config, updates=all_updates, section_path_str=section_path_str
)
def track_post_update(self, config_: ConfigBase, section_path_str: str, d: Optional[dict] = None, **updates):
if d is None:
d = {}
all_updates = {**d, **updates}
manager.plm.hook.pyfileconf_post_config_changed(
manager=self, new_config=config_, updates=all_updates, section_path_str=section_path_str
)
def _determine_and_track_if_config_would_be_updated(self, config_: ConfigBase,
section_path_str: Optional[str] = None,
d_: Optional[dict] = None, **updates) -> bool:
if d_ is None:
d_ = {}
all_updates = {**d_, **updates}
would_update = config_.would_update(all_updates)
if would_update and section_path_str is not None:
self._track_pre_update(config_, section_path_str, all_updates)
return would_update
def _determine_and_track_if_config_would_be_refreshed(self, config: ConfigBase, section_path_str: str) -> bool:
updates = config.change_from_refresh()
if updates:
self._track_pre_update(config, section_path_str, updates)
return updates != {}
@property
def pipeline_manager(self) -> 'PipelineManager':
from pyfileconf.main import PipelineManager
return PipelineManager.get_manager_by_section_path_str(self.pipeline_manager_name)
def _get_config_from_config_or_section(config_or_section: ConfigSectionOrConfig) -> Optional[ActiveFunctionConfig]:
# Pull Config file from ConfigSection
if isinstance(config_or_section, ActiveFunctionConfig):
# config already loaded
return config_or_section
if isinstance(config_or_section, ConfigSection):
# config already loaded
return config_or_section.config
if isinstance(config_or_section, ActiveFunctionConfigFile):
# Load config from file
return config_or_section.load(ActiveFunctionConfig)
raise ValueError(f'expected Config or ConfigSection, got {config_or_section} of type {config_or_section}')
def _refresh_configs(section_paths: Iterable[SectionPath]):
from pyfileconf import PipelineManager
for sp in section_paths:
manager = PipelineManager.get_manager_by_section_path_str(sp.path_str)
relative_section_path = SectionPath('.'.join(sp[1:]))
manager.refresh(relative_section_path.path_str) |
# Uso de paquete de las funcionalidades de Speech
import azure.cognitiveservices.speech as speechsdk
# Variables para tener acceso a la API
clave_suscripcion = "Aquí va la clave"
ubicacion = "Aquí la va región-ubicación"
# Acceso al recurso
speech_config = speechsdk.SpeechConfig(subscription=clave_suscripcion, region=ubicacion)
# El idioma configurado al momento del reconocimiento de voz
idioma = speechsdk.languageconfig.SourceLanguageConfig(language="es-MX")
# Se envía la configuración anterior y reconoce tu voz
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config,
source_language_config=idioma)
print("Habla en 3, 2, 1, ahora =>")
# Guarda en resultado lo que detectó el speech_recognizer hasta un máximo de 15
# segundos o cuando no escuche nada
resultado = speech_recognizer.recognize_once()
# Muestra el resultado en cuanto si existen errores, si hizo el reconocimiento de voz,
# etc.
if resultado.reason == speechsdk.ResultReason.RecognizedSpeech:
print("Lo que dijiste :D: {}".format(resultado.text))
elif resultado.reason == speechsdk.ResultReason.NoMatch:
print("No reconocí nada: {}".format(resultado.no_match_details))
elif resultado.reason == speechsdk.ResultadoReason.Canceled:
cancellation_details = result.cancellation_details
print("Reconocimiento de voz cancelado: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Detalles del error: {}".format(cancellation_details.error_details)) |
import unittest
from interpreter import instructions
from interpreter.memory import Memory
from interpreter.registers import Registers
class TestLwl(unittest.TestCase):
# Lwl
# Test with different alignment
def test_lwl_0(self):
'''
Tests lwl instruction sets the left bits of the register with the provided word
'''
reg = Registers(False)
mem = Memory(False)
mem.addWord(0x12345678, 0x10010000)
reg.set_register('$t0', 0x2468abcd)
reg.set_register("$t1", 0x10010000)
instructions.lwl('$t0', "$t1", 0, mem, reg)
self.assertEqual(0x7868abcd, reg.get_register('$t0'))
def test_lwl_1(self):
'''
Tests lwl instruction sets the left bits of the register with the provided word
'''
reg = Registers(False)
mem = Memory(False)
mem.addWord(0x12345678, 0x10010000)
reg.set_register('$t0', 0x2468abcd)
reg.set_register("$t1", 0x10010001)
instructions.lwl('$t0', "$t1", 0, mem, reg)
self.assertEqual(0x5678abcd, reg.get_register('$t0'))
def test_lwl_2(self):
'''
Tests lwl instruction sets the left bits of the register with the provided word
'''
reg = Registers(False)
mem = Memory(False)
mem.addWord(0x12345678, 0x10010000)
reg.set_register('$t0', 0x2468abcd)
reg.set_register("$t1", 0x10010002)
instructions.lwl('$t0', "$t1", 0, mem, reg)
self.assertEqual(0x345678cd, reg.get_register('$t0'))
def test_lwl_3(self):
'''
Tests lwl instruction sets the left bits of the register with the provided word
'''
reg = Registers(False)
mem = Memory(False)
mem.addWord(0x12345678, 0x10010000)
reg.set_register('$t0', 0x2468abcd)
reg.set_register("$t1", 0x10010003)
instructions.lwl('$t0', "$t1", 0, mem, reg)
self.assertEqual(0x12345678, reg.get_register('$t0')) |
from PIL import Image, ImageDraw, ImageFont
from PyQt5.QtWidgets import *
import sys
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QApplication, QPushButton, QWidget, QLabel
from PyQt5.QtGui import QIcon, QPixmap
import face_recognition
# Image for opening
nameImage = "C:\\Users\\Nika Kim\\Desktop\\faces\\K.jpg"
# Load the png file into a numpy array
image = face_recognition.load_image_file(nameImage)
# Find all facial features in all the faces in the image
face_landmarks_list = face_recognition.face_landmarks(image)
pil_image = Image.fromarray(image)
def find_red_pixel(image_name):
# Set the value you want for these variables
r_min = 255
g_min = 0
b_min = 0
pixels = set()
img = Image.open(image_name)
rgb = img.convert('RGB')
for x in range(img.size[0]):
for y in range(img.size[1]):
r, g, b = rgb.getpixel((x, y))
if r == r_min and b == b_min and g == g_min:
pixels.add((x, y))
return pixels
def analys(imageForAnalys):
# Find red point
red_pixels = set()
red_pixels = find_red_pixel(imageForAnalys)
if len(red_pixels) == 0:
print("ERROR: PUT RED POINT IN THE FOREHEAD")
redPoint = red_pixels.pop()
# Calculate main items
betweenEyes = face_landmarks['right_eye'][0][0] - face_landmarks['left_eye'][3][0]
leftEyeLength = face_landmarks['left_eye'][3][0] - face_landmarks['left_eye'][0][0]
rightEyeLength = face_landmarks['right_eye'][3][0] - face_landmarks['right_eye'][0][0]
eyeLength = 1.5 * (rightEyeLength + leftEyeLength) / 2
noseLength = face_landmarks['nose'][4][1] - face_landmarks['right_eye'][0][1]
faceLengthWithoutForehead = face_landmarks['border'][1][1] - face_landmarks['border'][0][1]
fromChinToLips = face_landmarks['border'][1][1] - face_landmarks['lips_middle'][0][1]
meanEyebrowBegining = (face_landmarks['left_eyebrow'][4][1] + face_landmarks['right_eyebrow'][0][1])/2
forehead = meanEyebrowBegining - redPoint[1]
allFace = face_landmarks['chin'][8][1] - red_pixels.pop()[1]
cheekbones = face_landmarks['cheekbones'][1][0] - face_landmarks['cheekbones'][0][0]
yPupil = (face_landmarks['right_eye'][5][1] + face_landmarks['right_eye'][1][1])/2
# Values
pui = (betweenEyes + eyeLength)/cheekbones
puti = (yPupil - redPoint[1])/allFace
nsi = noseLength/allFace
stoi = fromChinToLips/allFace
# Show lines of values on face
d = ImageDraw.Draw(pil_image, 'RGBA')
im = Image.open(nameImage)
(width, height) = im.size
if (width < 600 or height < 600):
lineWidth = 2
ellipsRad = 1
elif (width < 1000 or height < 1000):
lineWidth = 5
ellipsRad = 5
else:
lineWidth = 8
ellipsRad = 10
# Eyes
d.line([(face_landmarks['left_eye'][3][0]-leftEyeLength/2, face_landmarks['left_eye'][3][1]),
(face_landmarks['right_eye'][0][0]+rightEyeLength/2, face_landmarks['right_eye'][0][1])],
fill=(150, 0, 0, 64), width=lineWidth)
d.ellipse((face_landmarks['left_eye'][3][0]-leftEyeLength/2 - ellipsRad, face_landmarks['left_eye'][3][1] - ellipsRad,
face_landmarks['left_eye'][3][0]-leftEyeLength/2 + ellipsRad, face_landmarks['left_eye'][3][1] + ellipsRad),
fill="red", outline="red")
d.ellipse((face_landmarks['right_eye'][0][0]+rightEyeLength/2 - ellipsRad, face_landmarks['right_eye'][0][1] - ellipsRad,
face_landmarks['right_eye'][0][0]+rightEyeLength/2 + ellipsRad, face_landmarks['right_eye'][0][1] + ellipsRad),
fill="red", outline="red")
# font = ImageFont.truetype(<font-file>, <font-size>)
# font = ImageFont.truetype("sans-serif.ttf", 16)
# draw.text((x, y),"Sample Text",(r,g,b))
# Forehead
d.line([(face_landmarks['nose'][0][0], redPoint[1]), (face_landmarks['nose'][0][0], face_landmarks['nose'][0][1])],
fill=(150, 0, 0, 64), width=lineWidth)
d.ellipse((face_landmarks['nose'][0][0] - ellipsRad, redPoint[1] - ellipsRad,
face_landmarks['nose'][0][0] + ellipsRad, redPoint[1] + ellipsRad),
fill="red", outline="red")
d.ellipse((face_landmarks['nose'][0][0] - ellipsRad, face_landmarks['nose'][0][1] - ellipsRad,
face_landmarks['nose'][0][0] + ellipsRad, face_landmarks['nose'][0][1] + ellipsRad),
fill="red", outline="red")
# Nose
d.line([(face_landmarks['nose'][0][0], face_landmarks['nose'][0][1]), (face_landmarks['nose'][0][0], face_landmarks['nose_tip'][3][1])],
fill=(150, 0, 0, 64), width=lineWidth)
d.ellipse((face_landmarks['nose'][0][0] - ellipsRad, face_landmarks['nose_tip'][3][1] - ellipsRad,
face_landmarks['nose'][0][0] + ellipsRad, face_landmarks['nose_tip'][3][1] + ellipsRad), fill="red", outline="red")
# Cheekbones
d.line([(face_landmarks['cheekbones'][0][0], face_landmarks['cheekbones'][0][1]),
(face_landmarks['cheekbones'][1][0], face_landmarks['cheekbones'][0][1])],
fill=(150, 0, 0, 64), width=lineWidth)
d.ellipse((face_landmarks['cheekbones'][0][0] - ellipsRad, face_landmarks['cheekbones'][0][1] - ellipsRad,
face_landmarks['cheekbones'][0][0] + ellipsRad, face_landmarks['cheekbones'][0][1] + ellipsRad),
fill="red", outline="red")
d.ellipse((face_landmarks['cheekbones'][1][0] - ellipsRad, face_landmarks['cheekbones'][0][1] - ellipsRad,
face_landmarks['cheekbones'][1][0] + ellipsRad, face_landmarks['cheekbones'][0][1] + ellipsRad),
fill="red", outline="red")
# Lips
d.line([(face_landmarks['nose'][0][0], face_landmarks['lips_middle'][0][1]),
(face_landmarks['nose'][0][0], face_landmarks['border'][1][1])],
fill=(150, 0, 0, 64), width=lineWidth)
d.ellipse((face_landmarks['nose'][0][0] - ellipsRad, face_landmarks['lips_middle'][0][1] - ellipsRad,
face_landmarks['nose'][0][0] + ellipsRad, face_landmarks['lips_middle'][0][1] + ellipsRad),
fill="red", outline="red")
d.ellipse((face_landmarks['nose'][0][0] - ellipsRad, face_landmarks['border'][1][1] - ellipsRad,
face_landmarks['nose'][0][0] + ellipsRad, face_landmarks['border'][1][1] + ellipsRad),
fill="red", outline="red")
# All length
d.line([(face_landmarks['nose'][0][0] + lineWidth*3, redPoint[1]), (face_landmarks['nose'][0][0] + lineWidth*3, face_landmarks['border'][1][1])],
fill=(150, 0, 0, 64), width=lineWidth)
d.ellipse((face_landmarks['nose'][0][0] + lineWidth*3 - ellipsRad, redPoint[1] - ellipsRad,
face_landmarks['nose'][0][0] + lineWidth*3 + ellipsRad, redPoint[1] + ellipsRad),
fill="red", outline="red")
d.ellipse((face_landmarks['nose'][0][0] + lineWidth*3 - ellipsRad, face_landmarks['border'][1][1] - ellipsRad,
face_landmarks['nose'][0][0] + lineWidth*3 + ellipsRad, face_landmarks['border'][1][1] + ellipsRad),
fill="red", outline="red")
#d.line(face_landmarks['right_eye'] + [face_landmarks['right_eye'][0]], fill=(0, 0, 0, 110), width=6)
# print("----------------------------------------------------------------------------\n")
#
# print("Length of eyes ", betweenEyes)
# print("Length of left eye ", leftEyeLength)
# print("Length of right eye ", rightEyeLength)
# print("Length of nose ", noseLength)
# print("Length face without forehead ", faceLengthWithoutForehead)
# print("Length from chin to lips ", fromChinToLips)
# print("Mean eyebrow begining ", meanEyebrowBegining)
# print("Length of forehead ", forehead)
# print("Length of face ", allFace)
# print("Length of cheekbones ", cheekbones)
# print("Pupil ", yPupil)
#
# print("pui: ", pui)
# print("puti: ", puti)
# print("nsi: ", nsi)
# print("stoi: ", stoi)
print("----------------------------------------------------------------------------\n")
interested = 0.357*nsi + 0.544/puti
happy = 0.4*pui + 0.285*stoi + 0.409*puti
impressed = 0.386*nsi + 0.432/puti
sad = 0.356/pui + 0.423/stoi
despised = 0.287/nsi
scared = 0.499*nsi + 0.317/stoi + 0.495/puti
guilty = 0.435*nsi +0.472/stoi
print("interested: ", interested)
print("happy: ", happy)
print("impressed: ", impressed)
print("sad: ", sad)
print("despised: ", despised)
print("scared: ", scared)
print("guilty: ", guilty)
interested0 = 0.357 * 0.33 + 0.544 / 0.33
happy0 = 0.4 * 0.5 + 0.285 * 0.25 + 0.409 * 0.33
impressed0 = 0.386 * 0.33 + 0.432 / 0.33
sad0 = 0.356 / 0.5 + 0.423 / 0.25
despised0 = 0.287 / 0.33
scared0 = 0.499 * 0.33 + 0.317 / 0.25 + 0.495 / 0.33
guilty0 = 0.435 * 0.33 + 0.472 / 0.25
devInterested = 100 * (1 - interested/interested0)
devHappy = 100 * (1 - happy/happy0)
devImpressed = 100 * (1 - impressed/impressed0)
devSad = 100 * (1 - sad/sad0)
devDespised = 100 * (1 - despised/despised0)
devScared = 100 * (1 - scared/scared0)
devGuilty = 100 * (1 - guilty/guilty0)
print("")
print("devInterested: ", devInterested, "%")
print("devHappy: ", devHappy, "%")
print("devImpressed: ", devImpressed, "%")
print("devSad: ", devSad, "%")
print("devDespised: ", devDespised, "%")
print("devScared: ", devScared, "%")
print("devGuilty: ", devGuilty, "%")
pil_image.show()
class Widget(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('Analyser')
self.point = None
hbox = QVBoxLayout(self)
pixmap = QPixmap(nameImage)
self.lbl = QLabel(self)
self.lbl.setPixmap(pixmap)
hbox.addWidget(self.lbl)
self.setLayout(hbox)
btn1 = QPushButton("Готово", self)
hbox.addWidget(btn1)
btn2 = QPushButton("Очистить", self)
hbox.addWidget(btn2)
btn1.clicked.connect(self.button1Clicked)
btn2.clicked.connect(self.button2Clicked)
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
self.drawText(event, qp)
qp.end()
def drawText(self, event, qp):
qp.setPen(QColor(168, 34, 3))
qp.setFont(QFont('Decorative', 10))
qp.drawText(event.rect(), Qt.AlignCenter, self.text)
def button1Clicked(self):
newName = "C:\\Users\\Nika Kim\\Desktop\\faces\\K.png"
self.lbl.pixmap().save(newName, 'png')
analys(newName)
self.close()
def button2Clicked(self):
pixmap = QPixmap(nameImage)
self.lbl.setPixmap(pixmap)
def mousePressEvent(self, event):
self.point = event.pos()
# Вызов перерисовки виджета
self.update()
def mouseReleaseEvent(self, event):
self.point = None
def paintEvent(self, event):
super().paintEvent(event)
# Если нет
if not self.point:
return
painter = QPainter(self.lbl.pixmap())
painter.setPen(QPen(Qt.red, 10.0))
painter.drawPoint(self.point)
for face_landmarks in face_landmarks_list:
app = QApplication(sys.argv)
ex = Widget()
ex.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
import os, cv2
import numpy as np
import matplotlib.pyplot as plt
import pickle
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
tf.__version__
PATH = os.getcwd()
LOG_DIR = PATH + '/embedding-logs'
# metadata = os.path.join(LOG_DIR, 'metadata2.tsv')
# %%
data_path = PATH + '/data'
data_dir_list = os.listdir(data_path)
img_data = []
for dataset in data_dir_list:
img_list = os.listdir(data_path + '/' + dataset)
print('Loaded the images of dataset-' + '{}\n'.format(dataset))
for img in img_list:
input_img = cv2.imread(data_path + '/' + dataset + '/' + img)
input_img_resize = cv2.resize(input_img, (224, 224))
img_data.append(input_img_resize)
img_data = np.array(img_data)
# %%
feature_vectors = np.loadtxt('feature_vectors_400_samples.txt')
print("feature_vectors_shape:", feature_vectors.shape)
print("num of images:", feature_vectors.shape[0])
print("size of individual feature vector:", feature_vectors.shape[1])
num_of_samples = feature_vectors.shape[0]
num_of_samples_each_class = 100
features = tf.Variable(feature_vectors, name='features')
y = np.ones((num_of_samples,), dtype='int64')
y[0:100] = 0
y[100:200] = 1
y[200:300] = 2
y[300:] = 3
names = ['cats', 'dogs', 'horses', 'humans']
# with open(metadata, 'w') as metadata_file:
# for row in range(210):
# c = y[row]
# metadata_file.write('{}\n'.format(c))
metadata_file = open(os.path.join(LOG_DIR, 'metadata_4_classes.tsv'), 'w')
metadata_file.write('Class\tName\n')
k = 100 # num of samples in each class
j = 0
# for i in range(210):
# metadata_file.write('%06d\t%s\n' % (i, names[y[i]]))
for i in range(num_of_samples):
c = names[y[i]]
if i % k == 0:
j = j + 1
metadata_file.write('{}\t{}\n'.format(j, c))
# metadata_file.write('%06d\t%s\n' % (j, c))
metadata_file.close()
# Taken from: https://github.com/tensorflow/tensorflow/issues/6322
def images_to_sprite(data):
"""Creates the sprite image along with any necessary padding
Args:
data: NxHxW[x3] tensor containing the images.
Returns:
data: Properly shaped HxWx3 image with any necessary padding.
"""
if len(data.shape) == 3:
data = np.tile(data[..., np.newaxis], (1, 1, 1, 3))
data = data.astype(np.float32)
min = np.min(data.reshape((data.shape[0], -1)), axis=1)
data = (data.transpose(1, 2, 3, 0) - min).transpose(3, 0, 1, 2)
max = np.max(data.reshape((data.shape[0], -1)), axis=1)
data = (data.transpose(1, 2, 3, 0) / max).transpose(3, 0, 1, 2)
# Inverting the colors seems to look better for MNIST
# data = 1 - data
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, 0),
(0, 0)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant',
constant_values=0)
# Tile the individual thumbnails into an image.
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3)
+ tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
data = (data * 255).astype(np.uint8)
return data
# %%
sprite = images_to_sprite(img_data)
cv2.imwrite(os.path.join(LOG_DIR, 'sprite_4_classes.png'), sprite)
# scipy.misc.imsave(os.path.join(LOG_DIR, 'sprite.png'), sprite)
# %%
with tf.Session() as sess:
saver = tf.train.Saver([features])
sess.run(features.initializer)
saver.save(sess, os.path.join(LOG_DIR, 'images_4_classes.ckpt'))
config = projector.ProjectorConfig()
# One can add multiple embeddings.
embedding = config.embeddings.add()
embedding.tensor_name = features.name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = os.path.join(LOG_DIR, 'metadata_4_classes.tsv')
# Comment out if you don't want sprites
embedding.sprite.image_path = os.path.join(LOG_DIR, 'sprite_4_classes.png')
embedding.sprite.single_image_dim.extend([img_data.shape[1], img_data.shape[1]])
# Saves a config file that TensorBoard will read during startup.
projector.visualize_embeddings(tf.summary.FileWriter(LOG_DIR), config) |
############################################################################
#
# Copyright (c) 2001, 2002, 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
"""``transaction`` module: Exported transaction functions.
"""
#: Default implementation of `~ITransaction`
from transaction._transaction import Transaction # noqa: F401 unused import
#: Default implementation of `~ISavepoint`
from transaction._transaction import Savepoint # noqa: F401 unused import
#: A single-threaded `~ITransactionManager`
from transaction._manager import TransactionManager # noqa: F401 unused import
#: A thread-safe `~ITransactionManager`
from transaction._manager import ThreadTransactionManager
# NB: "with transaction:" does not work under Python 3 because they worked
# really hard to break looking up special methods like __enter__ and __exit__
# via getattr and getattribute; see http://bugs.python.org/issue12022. On
# Python 3, you must use ``with transaction.manager`` instead.
#: The default transaction manager (a `~.ThreadTransactionManager`). All other
#: functions in this module refer to this object.
manager = ThreadTransactionManager()
#: See `.ITransactionManager.get`
get = __enter__ = manager.get
#: See `.ITransactionManager.begin`
begin = manager.begin
#: See `.ITransactionManager.commit`
commit = manager.commit
#: See `.ITransactionManager.abort`
abort = manager.abort
__exit__ = manager.__exit__
#: See `.ITransactionManager.doom`
doom = manager.doom
#: See `.ITransactionManager.isDoomed`
isDoomed = manager.isDoomed
#: See `.ITransactionManager.savepoint`
savepoint = manager.savepoint
#: See `.ITransactionManager.attempts`
attempts = manager.attempts
|
import pika
import os
import asyncio
import concurrent.futures
import uuid
import time
from PikaBus import PikaBusSetup
from PikaBus import PikaErrorHandler
def GetDefaultConnectionParams():
credentials = pika.PlainCredentials('amqp', 'amqp')
host = 'localhost'
if os.getenv('RUNNING_IN_CONTAINER', 'false') == 'true':
host = 'rabbitmq'
connParams = pika.ConnectionParameters(
host=host,
port=5672,
virtual_host='/',
credentials=credentials)
return connParams
def GetRandomQueue(prefix: str = 'test'):
id = str(uuid.uuid1())
return f'pika-{prefix}-{id}'
def GetRandomTopic():
id = str(uuid.uuid1())
return f'pika-topic-{id}'
def GetPikaBusSetup(listenerQueue: str = None, connParams: pika.ConnectionParameters = None, errorQueue: str = 'error', topics: list = []):
if connParams is None:
connParams = GetDefaultConnectionParams()
pikaErrorHandler = PikaErrorHandler.PikaErrorHandler(errorQueue=errorQueue, maxRetries=1)
return PikaBusSetup.PikaBusSetup(connParams,
defaultListenerQueue=listenerQueue,
defaultSubscriptions=topics,
pikaErrorHandler=pikaErrorHandler,
retryParams={'tries': 10})
def GetPayload(id = None, failing = False, reply = False, topic = ''):
if id is None:
id = str(uuid.uuid1())
return {
'id': id,
'failing': failing,
'reply': reply,
'topic': topic,
}
def CompleteTask(tasks: list):
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks))
def WaitUntilRabbitLives(connParams: pika.ConnectionParameters = None):
if connParams is None:
connParams = GetDefaultConnectionParams()
tries = 0
maxTries = 30
while tries < maxTries:
try:
with pika.BlockingConnection(connParams) as connection:
channel: pika.adapters.blocking_connection.BlockingChannel = connection.channel()
channel.close()
return
except:
pass
tries += 1
time.sleep(1)
raise Exception("Cannot connect to rabbitmq!") |
"""
Module for parsing exceptions
"""
import utils
# CLASSES =====================================================================
class CartographyParserException(Exception):
# Constructor -------------------------------------------------------------
def __init__(self, row: int, value: str, inv_type: str, pattern: str):
Exception.__init__(self, 'Invalid {}: <{}> (l.{}). Expected: <{}> (case insensitive)'.format(
inv_type,
utils.io.file.format_line_for_logging(value),
row,
pattern
))
|
"""This module contains the test cases for the middlewares of the ``scrapy_selenium`` package"""
from unittest.mock import patch
from contextlib import ExitStack
from scrapy import Request
from scrapy.crawler import Crawler
from scrapy_selenium.http import SeleniumRequest
from scrapy_selenium.middlewares import SeleniumMiddleware
from .test_cases import BaseScrapySeleniumTestCase
class SeleniumMiddlewareTestCase(BaseScrapySeleniumTestCase):
"""Test case for the ``SeleniumMiddleware`` middleware"""
@classmethod
def setUpClass(cls):
"""Initialize the middleware"""
super().setUpClass()
crawler = Crawler(
spidercls=cls.spider_klass,
settings=cls.settings
)
cls.selenium_middleware = SeleniumMiddleware.from_crawler(crawler)
@classmethod
def tearDownClass(cls):
"""Close the selenium webdriver"""
super().tearDownClass()
while not(cls.selenium_middleware.driver_queue.empty()):
cls.selenium_middleware.driver_queue.get().quit()
def test_from_crawler_method_should_initialize_the_driver(self):
"""Test that the ``from_crawler`` method should initialize the selenium driver"""
crawler = Crawler(
spidercls=self.spider_klass,
settings=self.settings
)
selenium_middleware = SeleniumMiddleware.from_crawler(crawler)
# The driver_queue must be initialized
self.assertIsNotNone(selenium_middleware.driver_queue)
# Each driver in the queue must be initialized:
for i in range(0,selenium_middleware.driver_queue.qsize()):
# queue is FIFO, so this should test all of them.
driver = selenium_middleware.driver_queue.get()
self.assertIsNotNone(driver)
selenium_middleware.driver_queue.put(driver)
# Test all of the drivers in the queue
for i in range(0, selenium_middleware.driver_queue.qsize()):
driver = selenium_middleware.driver_queue.get()
driver.get('http://www.python.org')
self.assertIsNotNone(driver)
self.assertIn('Python', driver.title)
selenium_middleware.driver_queue.put(driver)
for i in range(0, selenium_middleware.driver_queue.qsize()):
driver = selenium_middleware.driver_queue.get()
driver.close()
def test_spider_closed_should_close_the_driver(self):
"""Test that the ``spider_closed`` method should close the driver"""
crawler = Crawler(
spidercls=self.spider_klass,
settings=self.settings
)
selenium_middleware = SeleniumMiddleware.from_crawler(crawler)
with ExitStack() as stack:
mocked_quits = []
for i in range(0, selenium_middleware.driver_queue.qsize()):
mocked_quits.append(stack.enter_context(patch.object(selenium_middleware.driver_queue.get(), 'quit')))
selenium_middleware.spider_closed()
for mocked_quit in mocked_quits:
mocked_quit.assert_called_once()
def test_process_request_should_return_none_if_not_selenium_request(self):
"""Test that the ``process_request`` should return none if not selenium request"""
scrapy_request = Request(url='http://not-an-url')
self.assertIsNone(
self.selenium_middleware.process_request(
request=scrapy_request,
spider=None
)
)
def test_process_request_should_return_a_response_if_selenium_request(self):
"""Test that the ``process_request`` should return a response if selenium request"""
selenium_request = SeleniumRequest(url='http://www.python.org')
html_response = self.selenium_middleware.process_request(
request=selenium_request,
spider=None
)
# The driver assigned to this request is no longer in the driver queue
self.assertNotIn(
html_response.meta['driver'],
self.selenium_middleware.driver_queue
)
# We also have access to the "selector" attribute on the response
self.assertEqual(
html_response.selector.xpath('//title/text()').extract_first(),
'Welcome to Python.org'
)
def test_process_request_should_return_a_screenshot_if_screenshot_option(self):
"""Test that the ``process_request`` should return a response with a screenshot"""
selenium_request = SeleniumRequest(
url='http://www.python.org',
screenshot=True
)
html_response = self.selenium_middleware.process_request(
request=selenium_request,
spider=None
)
self.assertIsNotNone(html_response.meta['screenshot'])
def test_process_request_should_execute_script_if_script_option(self):
"""Test that the ``process_request`` should execute the script and return a response"""
selenium_request = SeleniumRequest(
url='http://www.python.org',
script='document.title = "scrapy_selenium";'
)
html_response = self.selenium_middleware.process_request(
request=selenium_request,
spider=None
)
self.assertEqual(
html_response.selector.xpath('//title/text()').extract_first(),
'scrapy_selenium'
)
|
from __future__ import division
from pylab import *
import elution as el
import utils as ut
import orth
import collections
import scipy
def prot_counts(fs, min_count=2):
"""
Sum up all the spectral counts for all the proteins in a set of
fractionations.
Filtered s.t. any returned protein will have at least min_count counts in
one fraction of one of the fractionations.
Return a dict: {prot1:count1, prot2:count2, ...}
"""
allprots = el.all_prots(fs, min_count=min_count)
pcounts = collections.defaultdict(float)
for f in fs:
e = el.load_elution(f)
psums = np.sum(np.array(e.mat),axis=1)
frac_sum = sum(psums)
norm_term = 1 / (frac_sum * len(fs))
for p,psum in zip(e.prots,psums):
if p in allprots:
pcounts[p] += (psum * norm_term)
return pcounts
def prot_conservation(fs,sp1,sp2, gridsize=30, od11=None, return_data=False,
filter_both=True, use_title=True, extent=[-22,-6,-22,-6], fontsize=18,
**kwargs):
"""
Currently only uses 1 to 1 orthologs, so odict should be a simple flat dict
of genesa:genesb.
"""
if sp1==sp2:
return
fs1,fs2 = [[f for f in fs if ut.shortname(f)[:2]==sp] for sp in sp1,sp2]
odict = orth.odict_1to1(sp1,sp2) if od11 == None else od11
pc1_all,pc2_all = [prot_counts(fs) for fs in fs1,fs2]
if filter_both:
ps_use = [p for p in odict if (pc1_all[p]>0 and pc2_all[odict[p]]>0)]
else:
ps_use = [p for p in pc1_all if p in odict]
pc1,pc2 = zip(*[(pc1_all[p], pc2_all[odict[p]]) for p in ps_use])
logpc1,logpc2 = [np.log2(pc) for pc in pc1,pc2]
plot(extent[:2],extent[2:],'k:', linewidth=1)
hexbin(logpc1,logpc2,gridsize=gridsize,**kwargs)
#if use_title:
#xlabel('%s log2 unique spectral counts' %sp1)
#ylabel('%s log2 unique spectral counts' %sp2)
#title('%s-%s: spearmanR: %0.2f, %s 1-1 nonzero ortholog pairs' %
#(sp1,sp2, scipy.stats.spearmanr(pc1,pc2)[0], len(pc1)))
rval = scipy.stats.spearmanr(pc1,pc2)[0]
annotate('R=%0.2f\nN=%s' % (rval, len(pc1)), xy=(.05,.7),
xycoords=("axes fraction"), fontsize=fontsize)
if return_data:
return pc1,pc2
def plot_conservations(fs,sp,othersps,**kwargs):
for i,osp in enumerate(othersps):
subplot(2,np.ceil(len(othersps)/2),i+1)
prot_conservation(fs, sp, osp, **kwargs)
grid("off")
gca().xaxis.set_ticklabels([])
gca().yaxis.set_ticklabels([])
draw()
def all_by_all_species(fs, sps, extent=[-22,-6,-22,-6], fontsize=18, **kwargs):
nsps = len(sps)
kwargs["extent"] = extent
for j,s1 in enumerate(sps):
for i,s2 in enumerate(sps):
print s1, s2, j*nsps+i+1
subplot(nsps, nsps, j*nsps+i+1)
if s1!=s2:
plot(extent[:2],extent[2:],'k:', linewidth=1)
result = prot_conservation(fs, s1, s2, use_title=False,
return_data=True, **kwargs)
if result:
pc1,pc2=result
rval = scipy.stats.spearmanr(pc1,pc2)[0]
annotate('R=%0.2f\nN=%s' % (rval, len(pc1)), xy=(.05,.7),
xycoords=("axes fraction"), fontsize=fontsize)
grid("off")
gca().xaxis.set_ticklabels([])
gca().yaxis.set_ticklabels([])
draw()
def hist_prot_counts(fs, in_prots_sets=[None],**kwargs):
"""
Usually set linewidth=3, histtype='step', range=(0,18), bins=36
"""
pcounts = prot_counts(fs).items()
for prots in in_prots_sets:
if prots:
pcounts = [(p,c) for p,c in pcounts if p in prots]
hist(np.log2(ut.i1(pcounts)), **kwargs)
|
import csv
import numpy as np
from datetime import datetime
import matplotlib as mpl
import matplotlib.pyplot as plt
from numpy import genfromtxt
import csv
import numpy as np
from datetime import datetime
import matplotlib as mpl
from numpy import genfromtxt
times = []
accelerometer = []
aX = []
aY = []
aZ = []
vX = []
vY = []
vZ = []
with open("../Left hand/Static/no_movement2.csv") as file:
csvObj = csv.reader(file, delimiter=',')
cnt = 0
for row in csvObj:
if cnt > 0:
row[0] = datetime.strptime(row[0][0:-6], '%Y-%m-%d %H:%M:%S.%f').timestamp() * 1000
cnt += 1
times.append(row)
x=0
try:
x = float(row[11])
except:
print(row[2])
y=0
try:
y = float(row[12])
except:
print(row[3])
z=0
try:
z = float(row[13])
except:
print(row[4])
aX.append(x)
aY.append(y)
aZ.append(z)
timestamps = []
for row in times:
timestamps.append(row[0])
dT = []
obj3 = []
obj4 = []
start = 240000
allTimes = []
size = 5000
# moveAvg2 = diff = times[start][0] - times[start-1][0]
beta = 0.95
for j in range(2, len(times)):
diff = times[j][0] - times[j-1][0]
dT.append(diff)
if(diff > 5000):
break
allTimes.append(times[j][0])
vX.append(diff*aX[j])
vY.append(diff*aY[j])
vZ.append(diff*aZ[j])
movingAvgX = vX[0]
movingAvgY = vY[0]
movingAvgZ = vZ[0]
avgsX = []
avgsY = []
avgsZ = []
for j in range(0, len(vX)):
avgX = movingAvgX*beta + vX[j]*(1-beta)
avgY = movingAvgY*beta + vY[j]*(1-beta)
avgZ = movingAvgZ*beta + vZ[j]*(1-beta)
avgsX.append(avgX + 10)
avgsY.append(avgY + 7)
avgsZ.append(avgZ + 80.6)
# calculate velocity
uX = 0
uY = 0
uZ = 0
vX = []
vY = []
vZ = []
velocity = []
for i in range(0, len(dT)):
# print(aX[i])
# print(dT[i])
# print(f"{aX[i]}, {aY[i]}, {aZ[i]}")
print(f"{aX[i]+aY[i]+aZ[i]+0.980665}")
uX += aX[i]*(dT[i]/1000)
uY += aY[i]*(dT[i]/1000)
uZ += aZ[i]*(dT[i]/1000)
vX.append(uX)
vY.append(uY)
vZ.append(uZ)
velocity.append(uX+uY+uZ)
# print(f"{uX}, {uY}, {uZ}")
print("done")
low = 1
high = 500
# plt.figure(0)
# plt.plot(allTimes[low:high], avgsX[low:high], "r")
# plt.plot(allTimes[low:high], avgsY[low:high], "g")
# plt.plot(allTimes[low:high], avgsZ[low:high], "b")
# plt.ylabel('acceleration')
# plt.title("accelerometer moving average")
plt.figure(1)
plt.plot(timestamps[low:high], aX[low:high], "r")
plt.plot(timestamps[low:high], aY[low:high], "g")
plt.plot(timestamps[low:high], aZ[low:high], "b")
plt.ylabel('acceleration')
plt.title("raw accelerometer")
# plt.figure(2)
# plt.title("time since last sample")
# plt.plot(timestamps[low:high], dT[low:high], "r")
# velocity plot
plt.figure(3)
plt.plot(timestamps[low:high], velocity[low:high], "r")
plt.ylabel('velocity')
plt.title("velocity")
plt.show()
|
from django.shortcuts import render,redirect
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from .models import Project,Profile,Review,NewsLetterRecipients
from .forms import NewsLetterForm,RegistrationForm,ReviewForm,ProfileForm,ProjectForm
from django.contrib import messages
from .email import send_welcome_email
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializer import ProjectSerializer,ProfileSerializer
from rest_framework import status
from .permissions import IsAdminOrReadOnly
# Create your views here.
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Thank you, {username}. Your account has been created')
return redirect('login')
else:
form = RegistrationForm()
return render(request, 'registration/register.html', {'form': form})
# @login_required(login_url='/accounts/login/')
def welcome(request):
# projects = Project.objects.all()
all_projects = Project.fetch_all_images()
form = NewsLetterForm()
return render(request, 'index.html',{"all_images":all_projects,"letterForm":form})
#Ajax functionality
def newsletter(request):
name = request.POST.get('your_name')
email = request.POST.get('email')
recipient = NewsLetterRecipients(name=name, email=email)
recipient.save()
send_welcome_email(name, email)
data = {'success': 'You have been successfully added to mailing list'}
return JsonResponse(data)
def search_project(request):
if 'project' in request.GET and request.GET ["project"]:
search_term = request.GET.get("project")
searched_projects = Project.search_project_by_title(search_term)
message = f'{search_term}'
return render(request, 'search.html', {"message":message, "projects":searched_projects})
else:
message = "No search results yet!"
return render (request, 'search.html', {"message": message})
# def project(request, id):
# try:
# project = Project.objects.get(pk = id)
# except DoesNotExist:
# raise Http404()
# current_user = request.user
# comments = Review.get_comment(Review, id)
# latest_review_list=Review.objects.all()
# if request.method == 'POST':
# form = ReviewForm(request.POST)
# if form.is_valid():
# design_rating = form.cleaned_data['design_rating']
# content_rating = form.cleaned_data['content_rating']
# usability_rating = form.cleaned_data['usability_rating']
# comment = form.cleaned_data['comment']
# review = Review()
# review.project = project
# review.user = current_user
# review.comment = comment
# review.design_rating = design_rating
# review.content_rating = content_rating
# review.usability_rating = usability_rating
# review.save()
# else:
# form = ReviewForm()
# return render(request, 'review_project.html', {"project": project,
# 'form':form,
# 'comments':comments,
# 'latest_review_list':latest_review_list})
# @login_required(login_url='/accounts/login/')
def profile(request):
current_user = request.user
projects = Project.objects.filter(User = current_user)
try:
prof = Profile.objects.get(prof_user=current_user)
except ObjectDoesNotExist:
return redirect('new_profile')
return render(request,'profile.html',{'profile':prof,'projects':projects})
# @login_required(login_url='/accounts/login/')
def new_profile(request):
current_user = request.user
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.prof_user = current_user
profile.profile_Id = request.user.id
profile.save()
return redirect('profile')
else:
form = ProfileForm()
return render(request, 'registration/new_profile.html', {"form": form})
# @login_required(login_url='/accounts/login/')
def profile_edit(request):
current_user = request.user
if request.method == 'POST':
logged_user = Profile.objects.get(prof_user=request.user)
form = ProfileForm(request.POST, request.FILES, instance=logged_user)
if form.is_valid():
form.save()
return redirect('profile')
else:
form = ProfileForm()
return render(request,'registration/edit_profile.html',{'form':form})
# @login_required(login_url='/accounts/login/')
def new_project(request):
current_user = request.user
if request.method == 'POST':
form = ProjectForm(request.POST,request.FILES)
if form.is_valid():
user_image = form.save(commit=False)
user_image.user = current_user
user_image.save()
return redirect('index')
else:
form = ProjectForm()
return render(request,"new_project.html",{"form":form})
# Project Serializer
class ProjectList(APIView):
def get(self, request, format = None):
all_projects = Project.objects.all()
serializers = ProjectSerializer(all_projects, many = True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = ProjectSerializer(data=request.data)
permission_classes = (IsAdminOrReadOnly,)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status= status.HTTP_201_CREATED)
return Response(serializers.errors, status = status.HTTP_400_BAD_REQUEST)
class ProjectDescription(APIView):
permission_classes = (IsAdminOrReadOnly,)
def get_project(self, pk):
try:
return Project.objects.get(pk=pk)
except Project.DoesNotExist:
return Http404
def get(self, request, pk, format=None):
project = self.get_project(pk)
serializers = ProjectSerializer(project)
return Response(serializers.data)
def put(self, request, pk, format = None):
project = self.get_project(pk)
serializers = ProjectSerializer(project, request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data)
else:
return Response(serializers.errors,
status = status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
project = self.get_project(pk)
project.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# Profile Serializer
class ProfileList(APIView):
def get(self, request, format = None):
all_profiles = Profile.objects.all()
serializers = ProfileSerializer(all_profiles, many = True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = ProfileSerializer(data=request.data)
permission_classes = (IsAdminOrReadOnly,)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status= status.HTTP_201_CREATED)
return Response(serializers.errors, status = status.HTTP_400_BAD_REQUEST)
class ProfileDescription(APIView):
permission_classes = (IsAdminOrReadOnly,)
def get_profile(self, pk):
try:
return Profile.objects.get(pk=pk)
except Profile.DoesNotExist:
return Http404
def get(self, request, pk, format=None):
profile = self.get_profile(pk)
serializers = ProfileSerializer(profile)
return Response(serializers.data)
def put(self, request, pk, format = None):
profile = self.get_profile(pk)
serializers = ProfileSerializer(profile, request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data)
else:
return Response(serializers.errors,
status = status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
profile = self.get_profile(pk)
profile.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
# def review_list(request):
# latest_review_list = Review.objects.all()
# context = {'latest_review_list':latest_review_list}
# return render(request, 'review_list.html', context)
# def review_detail(request, review_id):
# review = get_object_or_404(Review, pk=review_id)
# return render(request, 'review_detail.html', {'review': review})
# def project_list(request):
# project_list = Project.objects.order_by('-title')
# context = {'project_list':project_list}
# return render(request, 'project_list.html', context)
def project_review(request,project_id):
try:
single_project = Project.get_single_project(project_id)
average_score = round(((single_project.design + single_project.usability + single_project.content)/3),2)
if request.method == 'POST':
vote_form = VoteForm(request.POST)
if vote_form.is_valid():
single_project.vote_submissions+=1
if single_project.design == 0:
single_project.design = int(request.POST['design'])
else:
single_project.design = (single_project.design + int(request.POST['design']))/2
if single_project.usability == 0:
single_project.usability = int(request.POST['usability'])
else:
single_project.usability = (single_project.usability + int(request.POST['usability']))/2
if single_project.content == 0:
single_project.content = int(request.POST['content'])
else:
single_project.content = (single_project.content + int(request.POST['usability']))/2
single_project.save()
return redirect('project_review',project_id)
else:
vote_form = VoteForm()
except Exception as e:
raise Http404()
return render(request,'project_review.html',{"vote_form":vote_form,"single_project":single_project,"average_score":average_score}) |
def findDecision(obj): #obj[0]: Coupon, obj[1]: Education, obj[2]: Occupation, obj[3]: Restaurant20to50, obj[4]: Distance
# {"feature": "Education", "instances": 51, "metric_value": 0.9774, "depth": 1}
if obj[1]<=3:
# {"feature": "Occupation", "instances": 46, "metric_value": 0.9945, "depth": 2}
if obj[2]<=20:
# {"feature": "Restaurant20to50", "instances": 42, "metric_value": 1.0, "depth": 3}
if obj[3]<=2.0:
# {"feature": "Coupon", "instances": 40, "metric_value": 0.9982, "depth": 4}
if obj[0]<=3:
# {"feature": "Distance", "instances": 25, "metric_value": 0.971, "depth": 5}
if obj[4]>1:
return 'True'
elif obj[4]<=1:
return 'True'
else: return 'True'
elif obj[0]>3:
# {"feature": "Distance", "instances": 15, "metric_value": 0.971, "depth": 5}
if obj[4]<=1:
return 'True'
elif obj[4]>1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>2.0:
return 'False'
else: return 'False'
elif obj[2]>20:
return 'True'
else: return 'True'
elif obj[1]>3:
return 'True'
else: return 'True'
|
from setuptools import setup
setup(name="mstream",
version="0.2",
description="Instantly stream movies/ tv episodes you want to watch",
url="http://vaulstein.github.com",
author="Vaulstein Rodrigues",
author_email="vaulstein@gmail.com",
license='MIT',
packages=["mstream"],
scripts=["bin/mstream"],
install_requires=[
'BeautifulSoup4',
'requests'],
zip_safe=False) |
import datetime
from app import query_db
from app.ORM.Comment import get_all_comments_by_post
from app.ORM.User import get_user_by_id
class Post:
def __init__(self, user, content, image, creation_time=datetime.datetime.now(), id=0):
self.id = id # Never persisted
self.user = user
self.content = content
self.image = image
self.creation_time = creation_time
self.comments = []
def persist(self):
query_db('INSERT INTO Posts (u_id, content, image, creation_time) VALUES({}, "{}", "{}", \'{}\');'.format(
self.user.id,
self.content,
self.image,
self.creation_time))
def __eq__(self, other):
return self.__class__ == other.__class__ and self.id == other.id
def get_post_by_id(post_id):
query = query_db(
'SELECT * FROM Posts WHERE id == "{}";'.format(
post_id), one=True)
user = get_user_by_id(query["u_id"])
post = Post(user, query["content"], query["image"], query["creation_time"], query["id"])
post.comments = get_all_comments_by_post(post)
return post
def get_all_posts_by_user(user):
query = query_db(
'SELECT * FROM Posts AS p JOIN Users AS u ON u.id=p.u_id WHERE p.u_id IN (SELECT u_id FROM Friends WHERE f_id={0}) OR p.u_id IN (SELECT f_id FROM Friends WHERE u_id={0}) OR p.u_id={0} ORDER BY p.creation_time DESC;'.format(
user.id))
posts = []
for e in query:
user = get_user_by_id(e["u_id"])
post = Post(user, e["content"], e["image"], e["creation_time"], e["id"])
post.comments = get_all_comments_by_post(post)
posts.append(post)
return posts
|
#!/usr/bin/env python
import libvirt
import socket
return_data = dict()
conn = libvirt.openReadOnly()
try:
domains = conn.listDomainsID()
return_data['kvm_vms'] = len(domains)
return_data['kvm_total_vcpus'] = conn.getCPUMap()[0]
return_data['kvm_scheduled_vcpus'] = 0
for domain in domains:
return_data['kvm_scheduled_vcpus'] += conn.lookupByID(
domain
).maxVcpus()
return_data['kvm_host_id'] = abs(hash(socket.getfqdn()))
except Exception as exp:
raise SystemExit('Plugin failure -- Reason: "%s"' % exp)
else:
line_data = 'kvm '
for key, value in return_data.items():
line_data += '%s=%s,' % (key.replace(' ', '_'), value)
else:
line_data = line_data.rstrip(',')
print(line_data)
finally:
conn.close()
|
from typing import Union, Tuple
from hwt.interfaces.agents.handshaked import HandshakedAgent
from hwt.interfaces.std import Handshaked, HandshakeSync, VectSignal
from hwt.synthesizer.param import Param
from hwtLib.handshaked.intfBiDirectional import HandshakedBiDirectionalAgent
from ipCorePackager.constants import DIRECTION
from hwtSimApi.hdlSimulator import HdlSimulator
class IndexKeyHs(Handshaked):
"""
.. hwt-autodoc::
"""
def _config(self):
self.INDEX_WIDTH = Param(4)
self.KEY_WIDTH = Param(4)
def _declr(self):
HandshakeSync._declr(self)
if self.KEY_WIDTH:
self.key = VectSignal(self.KEY_WIDTH)
self.index = VectSignal(self.INDEX_WIDTH)
def _initSimAgent(self, sim: HdlSimulator):
self._ag = IndexKeyHsAgent(sim, self)
class IndexKeyHsAgent(HandshakedAgent):
def get_data(self) -> Union[Tuple[int, int], int]:
i = self.intf
if i.KEY_WIDTH:
return (i.key.read(), i.index.read())
else:
return i.index.read()
def set_data(self, data: Union[Tuple[int, int], int]):
intf = self.intf
if intf.KEY_WIDTH:
if data is None:
k = None
i = None
else:
k, i = data
intf.key.write(k)
else:
i = data
intf.index.write(i)
class IndexKeyInHs(Handshaked):
"""
.. hwt-autodoc::
"""
def _config(self):
self.INDEX_WIDTH = Param(4)
self.KEY_WIDTH = Param(4)
def _declr(self):
HandshakeSync._declr(self)
if self.KEY_WIDTH:
self.key = VectSignal(self.KEY_WIDTH, masterDir=DIRECTION.IN)
self.index = VectSignal(self.INDEX_WIDTH)
def _initSimAgent(self, sim: HdlSimulator):
self._ag = IndexKeyInHsAgent(sim, self)
class IndexKeyInHsAgent(HandshakedBiDirectionalAgent):
def onMonitorReady(self):
"write din"
i = self.intf
if i.KEY_WIDTH:
d = self.dinData.popleft()
i.key.write(d)
def onDriverWriteAck(self):
"read din"
i = self.intf
if i.KEY_WIDTH:
d = i.key.read()
self.dinData.append(d)
def get_data(self) -> Union[Tuple[int, int], int]:
return self.intf.index.read()
def set_data(self, data: Union[Tuple[int, int], int]):
self.intf.index.write(data)
|
#!/usr/bin/env python3
"""
Module to parse the vtr flow results.
"""
import sys
from pathlib import Path
import glob
from collections import OrderedDict, defaultdict
# pylint: disable=wrong-import-position
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
import vtr
from vtr import paths
# pylint: enable=wrong-import-position
def parse_file_and_update_results(filename, patterns, results):
"""
Find filename, and then look through for the matching patterns, updating results
"""
# We interpret the parse pattern's filename as a glob pattern
filepaths = glob.glob(filename)
if len(filepaths) > 1:
raise vtr.InspectError(
"File pattern '{}' is ambiguous ({} files matched)".format(filename, len(filepaths)),
len(filepaths),
filepaths,
)
if len(filepaths) == 1:
assert Path(filepaths[0]).exists
with open(filepaths[0], "r") as file:
for line in file:
while line[0] == "#":
line = line[1:]
for parse_pattern in patterns:
match = parse_pattern.regex().match(line)
if match and match.groups():
# Extract the first group value
results[parse_pattern] = match.groups()[0]
def parse_vtr_flow(arg_list):
"""
parse vtr flow output
"""
parse_path = arg_list[0]
parse_config_file = arg_list[1]
extra_params = arg_list[2:]
parse_config_file = vtr.util.verify_file(parse_config_file, "parse config")
if parse_config_file is None:
parse_config_file = str(paths.vtr_benchmarks_parse_path)
parse_patterns = vtr.load_parse_patterns(str(parse_config_file))
results = OrderedDict()
extra_params_parsed = OrderedDict()
for param in extra_params:
key, value = param.split("=", 1)
extra_params_parsed[key] = value
print(key, end="\t")
for parse_pattern in parse_patterns.values():
# Set defaults
results[parse_pattern] = (
parse_pattern.default_value() if parse_pattern.default_value() is not None else "-1"
)
# Print header row
print(parse_pattern.name(), end="\t")
print("")
for key, value in extra_params_parsed.items():
print(value, end="\t")
# Group parse patterns by filename so that we only need to read each log file from disk once
parse_patterns_by_filename = defaultdict(list)
for parse_pattern in parse_patterns.values():
parse_patterns_by_filename[parse_pattern.filename()].append(parse_pattern)
# Process each pattern
for filename, patterns in parse_patterns_by_filename.items():
parse_file_and_update_results(str(Path(parse_path) / filename), patterns, results)
# Print results
for parse_pattern in parse_patterns.values():
print(results[parse_pattern], end="\t")
print("")
return 0
if __name__ == "__main__":
parse_vtr_flow(sys.argv[1:])
|
from page_page import Page
from selenium.webdriver.common.by import By
class InternalPage(Page):
@property
def logout_button(self):
return self.driver.find_element_by_link_text("Log out")
@property
def is_this_page(self):
return self.is_element_visible((By.ID, "loginform"))
|
from rwp.environment import *
from rwp.sspade import *
from rwp.vis import *
from rwp.petool import *
logging.basicConfig(level=logging.DEBUG)
environment = Troposphere()
environment.ground_material = VeryDryGround()
environment.z_max = 300
max_range = 1000
#environment.knife_edges = [KnifeEdge(range=800, height=125)]
#profile1d = interp1d(x=[0, 100, 150, 300], y=[0, 32, 10, 45], fill_value="extrapolate")
profile1d = interp1d(x=[0, 5, 70, 100, 300], y=[0, 0, -30, 0, 0], fill_value="extrapolate")
environment.M_profile = lambda x, z: profile1d(z)
antenna = GaussAntenna(freq_hz=100e6, height=100, beam_width=15, eval_angle=30, polarz='V')
propagator = TroposphericRadioWaveSSPadePropagator(antenna=antenna, env=environment, max_range_m=max_range,
comp_params=HelmholtzPropagatorComputationalParams(
max_propagation_angle=50
))
#environment.ground_material = PerfectlyElectricConducting()
propagator_local_bc = TroposphericRadioWaveSSPadePropagator(antenna=antenna, env=environment, max_range_m=max_range,
comp_params=HelmholtzPropagatorComputationalParams(
max_propagation_angle=50,
terrain_method=TerrainMethod.staircase
))
field = propagator.calculate()
field_local_bc = propagator_local_bc.calculate()
petool_task = PETOOLPropagationTask(antenna=antenna, env=environment, two_way=False, max_range_m=max_range,
dx_wl=propagator.comp_params.dx_wl, n_dx_out=1, dz_wl=0.2, n_dz_out=5)
petool_field = petool_task.calculate()
vis = FieldVisualiser(field, trans_func=lambda v: 10 * cm.log10(1e-16 + abs(v)), label='Pade + Transparent BC', x_mult=1E-3)
vis_local_bc = FieldVisualiser(field_local_bc, trans_func=lambda v: 10 * cm.log10(1e-16 + abs(v)), label='Pade + local bc', x_mult=1E-3)
petool_vis = FieldVisualiser(petool_field, trans_func=lambda x: x, label='SSF (PETOOL)', x_mult=1E-3)
plt = vis.plot_hor(125, vis_local_bc, petool_vis)
plt.xlabel('Range (km)')
plt.ylabel('10log|u| (dB)')
plt.tight_layout()
plt.show()
plt = vis.plot2d(min=-40, max=0)
plt.title('10log|u|')
plt.xlabel('Range (km)')
plt.ylabel('Height (m)')
plt.tight_layout()
plt.show()
plt = vis_local_bc.plot2d(min=-40, max=0)
plt.title('10log|u|')
plt.xlabel('Range (km)')
plt.ylabel('Height (m)')
plt.tight_layout()
plt.show()
plt = petool_vis.plot2d(min=-40, max=0)
plt.title('10log|u|')
plt.xlabel('Range (km)')
plt.ylabel('Height (m)')
plt.tight_layout()
plt.show()
# coefs = propagator.propagator.lower_bc.coefs
# abs_coefs = np.array([np.linalg.norm(a) for a in coefs])
# plt.plot(np.log10(abs_coefs))
# plt.show() |
# -*- coding: utf-8 -*-
'''
Create an XDG function to get the config dir
'''
import os
def xdg_config_dir(config_dir=None):
'''
Check xdg locations for config files
'''
xdg_config = os.getenv('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
xdg_config_directory = os.path.join(xdg_config, 'salt')
if os.path.isdir(xdg_config_directory):
return xdg_config_directory
else:
if config_dir is None:
config_dir = '~/.'
else:
config_dir = os.path.join('~/.', config_dir)
return os.path.expanduser(config_dir)
|
# Copyright (C) 2020-2021 by Vd.
# This file is part of EnvReader, the modern environment variables processor.
# EnvReader is released under the MIT License (see LICENSE).
class EnvError(ValueError):
def __init__(self, field: str, msg: str):
self.__field = field
super().__init__(msg)
@property
def field(self) -> str:
return self.__field
class EnvMissingError(EnvError):
pass
class EnvTransformError(EnvError):
pass
|
# Libraries
import numpy as np
import pickle
from skimage.color import rgb2gray
# Other python files
import constants as k
from evaluation_metrics import *
import experiments as exp
import dataset as ds
from NetworkTF import *
import GIST as cust_gist # You have to install FFTW and lear-gist first before you can import
# Documentation here: https://github.com/tuttieee/lear-gist-python
# Converts numpy images (32x32x3) into 512-D GIST vector
# Converts images into grayscale first
def convertToGIST(images):
param = {
"orientationsPerScale": np.array([8,8,8,8]),
"numberBlocks": [4,4],
"fc_prefilt": 4,
"boundaryExtension": 10
}
gist = cust_gist.GIST(param)
gist_vectors = [gist._gist_extract(rgb2gray(img)) for img in images]
gist_vectors = np.array(gist_vectors, copy=False)
return gist_vectors
def evaluateCIFAR():
# Preprocess CIFAR data into standardized form (classDict)
try:
classDict = pickle.load(open("cifar_classdict.pkl","rb"))
print("Classdict dump found, loading previous")
except FileNotFoundError:
print("First breaking classes since no existing dump was found")
images, labels = ds.load_ds_raw('cifar_numpy.npz')
gistData = np.array(convertToGIST(images), copy=False)
classDict = ds.break_classes(gistData, labels)
pickle.dump(classDict, open("cifar_classdict.pkl","wb"))
# Generate inputs (supervised is a superset of unsupervised + +/- pairs)
print("Generating Inputs")
inputs = ds.generateInputs(classDict, "supervised")
# Run experiments
print("Running unsupervised experiment")
#upervised", inputs)
exp.runExperiment("cifar_model", "supervised", inputs)
if __name__ == '__main__':
evaluateCIFAR() |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Amir Hadifar. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import matplotlib
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
matplotlib.pyplot.ioff()
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
img_shape = x_train[0].shape
print('Cifar10 images size', img_shape)
y_train = keras.utils.to_categorical(y_train, num_classes=10)
y_test = keras.utils.to_categorical(y_test, num_classes=10)
train_gen = ImageDataGenerator(featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
test_gen = ImageDataGenerator(featurewise_std_normalization=True)
train_gen.fit(x_train)
test_gen.fit(x_test)
img_input = keras.layers.Input(shape=img_shape)
conv1 = keras.layers.Conv2D(16, 3, activation='relu', padding='same')(img_input)
pol1 = keras.layers.MaxPooling2D(2)(conv1)
conv2 = keras.layers.Conv2D(32, 3, activation='relu', padding='same')(pol1)
pol2 = keras.layers.MaxPooling2D(2)(conv2)
conv3 = keras.layers.Conv2D(64, 3, activation='relu', padding='same')(pol2)
pol3 = keras.layers.MaxPooling2D(2)(conv3)
flatten = keras.layers.Flatten()(pol3)
dens1 = keras.layers.Dense(512, activation='relu')(flatten)
dens2 = keras.layers.Dense(128, activation='relu')(dens1)
drop1 = keras.layers.Dropout(0.2)(dens2)
output = keras.layers.Dense(10, activation='softmax')(drop1)
model = keras.Model(img_input, output)
print(model.summary())
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit_generator(train_gen.flow(x_train, y_train, batch_size=64),
steps_per_epoch=len(x_train) / 64, epochs=30, verbose=2)
print(model.evaluate(x_test, y_test))
# visualize layers
# successive_outputs = [layer.output for layer in model.layers[1:]]
# visualization_model = Model(img_input, successive_outputs)
#
# x = x_test[1]
# x = x.reshape((1,) + x.shape) # Numpy array with shape (1, 150, 150, 3)
#
# # Rescale by 1/255
# # x /= 255
#
# # Let's run our image through our network, thus obtaining all
# # intermediate representations for this image.
# successive_feature_maps = visualization_model.predict(x)
#
# # These are the names of the layers, so can have them as part of our plot
# layer_names = [layer.name for layer in model.layers[1:]]
#
# # Now let's display our representations
# for layer_name, feature_map in zip(layer_names, successive_feature_maps):
# if len(feature_map.shape) == 4:
# # Just do this for the conv / maxpool layers, not the fully-connected layers
# n_features = feature_map.shape[-1] # number of features in feature map
# # The feature map has shape (1, size, size, n_features)
# size = feature_map.shape[1]
# # We will tile our images in this matrix
# display_grid = np.zeros((size, size * n_features))
# for i in range(n_features):
# # Postprocess the feature to make it visually palatable
# x = feature_map[0, :, :, i]
# x -= x.mean()
# x /= x.std()
# x *= 64
# x += 128
# x = np.clip(x, 0, 255).astype('uint8')
# # We'll tile each filter into this big horizontal grid
# display_grid[:, i * size: (i + 1) * size] = x
# # Display the grid
# scale = 20. / n_features
# plt.figure(figsize=(scale * n_features, scale))
# plt.title(layer_name)
# plt.grid(False)
# plt.imshow(display_grid, aspect='auto', cmap='viridis')
#
# plt.show()
|
#!/usr/bin/env python3
# Author: Christopher Blöcker, Timotheus Kampik, Tobias Sundqvist, Marcus?
from __future__ import print_function
import logging
import numpy as np
import sys
import termios
from cflib import crazyflie, crtp
from controller import ControllerThread
from multiprocessing import Process, Queue
from server import runServer, runPathPlanner
# Set a channel - if set to None, the first available crazyflie is used
URI = 'radio://0/110/2M'
def read_input(file=sys.stdin):
"""Registers keystrokes and yield these every time one of the
*valid_characters* are pressed."""
old_attrs = termios.tcgetattr(file.fileno())
new_attrs = old_attrs[:]
new_attrs[3] = new_attrs[3] & ~(termios.ECHO | termios.ICANON)
try:
termios.tcsetattr(file.fileno(), termios.TCSADRAIN, new_attrs)
while True:
try:
yield sys.stdin.read(1)
except (KeyboardInterrupt, EOFError):
break
finally:
termios.tcsetattr(file.fileno(), termios.TCSADRAIN, old_attrs)
def handle_keyboard_input(control, server):
pos_step = 0.1 # [m]
yaw_step = 5 # [deg]
for ch in read_input():
if ch == 'h':
print('Key map:')
print('>: Increase thrust (non-control mode)')
print('<: Decrease thrust (non-control mode)')
print('Q: quit program')
print('e: Enable motors')
print('q: Disable motors')
print('w: Increase x-reference by ', pos_step, 'm.')
print('s: Decrease x-reference by ', pos_step, 'm.')
print('a: Increase y-reference by ', pos_step, 'm.')
print('d: Decrease y-reference by ', pos_step, 'm.')
print('i: Increase z-reference by ', pos_step, 'm.')
print('k: Decrease z-reference by ', pos_step, 'm.')
print('j: Increase yaw-reference by ', yaw_step, 'm.')
print('l: Decrease yaw-reference by ', yaw_step, 'deg.')
print('7: Toggle debug logging')
elif ch == '>':
control.increase_thrust()
print('Increased thrust to', control.thrust_r)
elif ch == '<':
control.decrease_thrust()
print('Decreased thrust to', control.thrust_r)
elif ch == 'w':
control.pos_ref[0] += pos_step
print('Reference position changed to :', control.pos_ref)
elif ch == 's':
control.pos_ref[0] -= pos_step
print('Reference position changed to :', control.pos_ref)
elif ch == 'a':
control.pos_ref[1] += pos_step
print('Reference position changed to :', control.pos_ref)
elif ch == 'd':
control.pos_ref[1] -= pos_step
print('Reference position changed to :', control.pos_ref)
elif ch == 'i':
control.pos_ref[2] += pos_step
print('Reference position changed to :', control.pos_ref)
elif ch == 'k':
control.pos_ref[2] -= pos_step
print('Reference position changed to :', control.pos_ref)
elif ch == 'j':
control.yaw_ref += np.radians(yaw_step)
print('Yaw reference changed to :',
np.degrees(control.yaw_ref), 'deg.')
elif ch== 'l':
control.yaw_ref -= np.radians(yaw_step)
print('Yaw reference changed to :',
np.degrees(control.yaw_ref), 'deg.')
elif ch == ' ':
control.pos_ref[2] = 0.0
print('Reference position changed to :', control.pos_ref)
elif ch == 'e':
control.enable()
elif ch == 'q':
if not control.enabled:
print('Uppercase Q quits the program')
control.disable()
elif ch == 'Q':
server.terminate()
control.disable()
print('Bye!')
break
elif ch == '7':
control.toggle_debug()
else:
print('Unhandled key', ch, 'was pressed')
if __name__ == "__main__":
logging.basicConfig()
crtp.init_drivers(enable_debug_driver = False)
# the command queue for the crazyflie
crazyflieCommandQueue = Queue()
# set up the crazyflie
cf = crazyflie.Crazyflie(rw_cache = './cache')
control = ControllerThread(cf, crazyflieCommandQueue)
control.start()
# start the web interface to the crazyflie
server = Process(target = runServer, args = ("0.0.0.0", 8000, crazyflieCommandQueue))
server.start()
# start the path planning server
pathPlanner = Process(target = runPathPlanner, args = ("0.0.0.0", 8001, crazyflieCommandQueue))
pathPlanner.start()
# connect to the crazyflie
if URI is None:
print('Scanning for Crazyflies...')
available = crtp.scan_interfaces()
if available:
print('Found Crazyflies:')
for i in available:
print('-', i[0])
URI = available[0][0]
else:
print('No Crazyflies found!')
sys.exit(1)
print('Connecting to', URI)
cf.open_link(URI)
handle_keyboard_input(control, server)
cf.close_link()
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron Module"""
import torch
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from nemo.utils import logging
try:
from apex.transformer import parallel_state, tensor_parallel
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
_FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
_HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
_BF16_TYPES = (torch.BFloat16Tensor, torch.cuda.BFloat16Tensor)
def param_is_not_shared(param):
return not hasattr(param, 'shared') or not param.shared
class MegatronModule(torch.nn.Module):
"""Megatron specific extensions of torch Module with support
for pipelining."""
def __init__(self, share_word_embeddings=True):
if not HAVE_APEX:
raise ImportError(
"Apex was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
super(MegatronModule, self).__init__()
self.share_word_embeddings = share_word_embeddings
def word_embeddings_weight(self):
if parallel_state.is_pipeline_first_stage(ignore_virtual=True):
return self.language_model.embedding.word_embeddings.weight
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
if not self.share_word_embeddings:
raise Exception(
'word_embeddings_weight() called for last ' 'stage, but share_word_embeddings is false'
)
return self.word_embeddings.weight
raise Exception('word_embeddings_weight() should be ' 'called for first and last stage only')
def initialize_word_embeddings(self, init_method, vocab_size, hidden_size):
if not self.share_word_embeddings:
raise Exception('initialize_word_embeddings() was called but ' 'share_word_embeddings is false')
# This function just initializes the word embeddings in the final stage
# when we are using pipeline parallelism. If we aren't using pipeline
# parallelism there is nothing to do.
if parallel_state.get_pipeline_model_parallel_world_size() == 1:
return
# Parameters are shared between the word embeddings layer, and the
# heads at the end of the model. In a pipelined setup with more than
# one stage, the initial embedding layer and the head are on different
# workers, so we do the following:
# 1. Create a second copy of word_embeddings on the last stage, with
# initial parameters of 0.0.
# 2. Do an all-reduce between the first and last stage to ensure that
# the two copies of word_embeddings start off with the same
# parameter values.
# 3. In the training loop, before an all-reduce between the grads of
# the two word_embeddings layers to ensure that every applied weight
# update is the same on both stages.
if parallel_state.is_pipeline_last_stage():
assert not parallel_state.is_pipeline_first_stage()
self._word_embeddings_for_head_key = 'word_embeddings_for_head'
# set word_embeddings weights to 0 here, then copy first
# stage's weights using all_reduce below.
self.word_embeddings = tensor_parallel.VocabParallelEmbedding(
vocab_size, hidden_size, init_method=init_method
)
self.word_embeddings.weight.data.fill_(0)
self.word_embeddings.weight.shared = True
def sync_initial_word_embeddings(self):
if torch.distributed.is_initialized():
if parallel_state.is_pipeline_first_stage() or parallel_state.is_pipeline_last_stage():
torch.distributed.all_reduce(
self.word_embeddings_weight().data, group=parallel_state.get_embedding_group()
)
else:
logging.warning(
"WARNING! Distributed processes aren't initialized, so "
"word embeddings in the last layer are not synchronized. "
"If you are just manipulating a model this is fine, but "
"this needs to be handled manually. If you are training "
"something is definitely wrong."
)
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val`
#is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
def fp32_to_float16(val, float16_converter):
"""Convert fp32 `val` to fp16/bf16"""
def half_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, _FLOAT_TYPES):
val = float16_converter(val)
return val
return conversion_helper(val, half_conversion)
def float16_to_fp32(val):
"""Convert fp16/bf16 `val` to fp32"""
def float_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, (_BF16_TYPES, _HALF_TYPES)):
val = val.float()
return val
return conversion_helper(val, float_conversion)
class Float16Module(MegatronModule):
def __init__(self, module, precision):
if not HAVE_APEX:
raise ImportError(
"Apex was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
super().__init__()
self.precision = precision
if precision == 16:
self.add_module('module', module.half())
def float16_converter(val):
return val.half()
elif precision == 'bf16':
self.add_module('module', module.bfloat16())
def float16_converter(val):
return val.bfloat16()
else:
raise Exception(
f'precision {precision} is not supported. Float16Module (megatron_amp_O2) supports '
'only fp16 and bf16.'
)
self.float16_converter = float16_converter
def set_input_tensor(self, input_tensor):
return self.module.set_input_tensor(input_tensor)
def forward(self, *inputs, **kwargs):
if parallel_state.is_pipeline_first_stage():
inputs = fp32_to_float16(inputs, self.float16_converter)
outputs = self.module(*inputs, **kwargs)
if parallel_state.is_pipeline_last_stage():
outputs = float16_to_fp32(outputs)
return outputs
def state_dict(self, destination=None, prefix='', keep_vars=False):
return self.module.state_dict(destination, prefix, keep_vars)
def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
return self.module.state_dict_for_save_checkpoint(destination, prefix, keep_vars)
def word_embeddings_weight(self):
if parallel_state.is_pipeline_first_stage(ignore_virtual=True):
return self.module.language_model.embedding.word_embeddings.weight
if parallel_state.is_pipeline_last_stage(ignore_virtual=True):
if not self.share_word_embeddings:
raise Exception(
'word_embeddings_weight() called for last ' 'stage, but share_word_embeddings is false'
)
return self.module.word_embeddings.weight
raise Exception('word_embeddings_weight() should be ' 'called for first and last stage only')
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sentencepiece as spm
from ..albert.tokenizer import AlbertEnglishTokenizer
__all__ = ['ReformerTokenizer']
class ReformerTokenizer(AlbertEnglishTokenizer):
"""
Constructs a Reformer tokenizer based on SentencePiece .
This tokenizer inherits from :class:`~paddlenlp.transformers.tokenizer_utils.PretrainedTokenizer`
which contains most of the main methods. For more information regarding those methods,
please refer to this superclass.
Args:
sentencepiece_model_file (str):
The vocabulary file (ends with '.spm') required to instantiate
a `SentencePiece <https://github.com/google/sentencepiece>`__ tokenizer.
do_lower_case (bool):
Whether or not to lowercase the input when tokenizing. Defaults to `False`.
remove_space (bool):
Whether or note to remove space when tokenizing. Defaults to `True`.
keep_accents (bool):
Whether or note to keep accents when tokenizing. Defaults to `False`.
eos_token (str):
A special token representing the *eos (end-of-sentence)* token.
Defaults to "</s>".
unk_token (str):
A special token representing the *unknown (out-of-vocabulary)* token.
An unknown token is set to be `unk_token` inorder to be converted to an ID.
Defaults to "<unk>".
pad_token (str):
A special token used to make arrays of tokens the same size for batching purposes.
Defaults to "<unk>".
"""
resource_files_names = {
"sentencepiece_model_file": "spiece.model",
}
pretrained_resource_files_map = {
"sentencepiece_model_file": {
"reformer-crime-and-punishment":
"http://paddlenlp.bj.bcebos.com/models/transformers/reformer/reformer-crime-and-punishment/spiece.model",
},
}
pretrained_init_configuration = {
"reformer-crime-and-punishment": {
"do_lower_case": False
},
}
def __init__(self,
sentencepiece_model_file,
do_lower_case=False,
remove_space=True,
keep_accents=False,
eos_token="</s>",
unk_token="<unk>",
pad_token="<unk>",
**kwargs):
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.sentencepiece_model_file = sentencepiece_model_file
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(sentencepiece_model_file)
def __call__(self,
text,
text_pair=None,
max_seq_len=None,
stride=0,
is_split_into_words=False,
pad_to_max_seq_len=False,
truncation_strategy="longest_first",
return_position_ids=False,
return_token_type_ids=False,
return_attention_mask=True,
return_length=False,
return_overflowing_tokens=False,
return_special_tokens_mask=False):
return super(ReformerTokenizer, self).__call__(
text, text_pair, max_seq_len, stride, is_split_into_words,
pad_to_max_seq_len, truncation_strategy, return_position_ids,
return_token_type_ids, return_attention_mask, return_length,
return_overflowing_tokens, return_special_tokens_mask)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence.
An Reformer sequence has the following format:
- single sequence: ``X``
- pair of sequences: ``A B ``
Args:
token_ids_0 (List[int]):
List of IDs to which the special tokens will be added.
token_ids_1 (List[int], optional):
Optional second list of IDs for sequence pairs. Defaults to None.
Returns:
List[int]: List of input_id with the appropriate special tokens.
"""
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
"""
Create a mask from the two sequences.
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (List[int]):
List of IDs.
token_ids_1 (List[int], optional):
Optional second list of IDs for sequence pairs.
Returns:
List[int]: List of token_type_id according to the given sequence(s).
"""
if token_ids_1 is None:
return len(token_ids_0) * [0]
return len(token_ids_0) * [0] + len(token_ids_1) * [1]
|
"""
https://leetcode-cn.com/explore/interview/card/top-interview-questions-easy/7/trees/47/
题目:二叉树的最大深度
给定一个二叉树,找出其最大深度。
二叉树的深度为根节点到最远叶子节点的最长路径上的节点数。
说明: 叶子节点是指没有子节点的节点。
示例:
给定二叉树 [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
返回它的最大深度 3 。
@author Niefy
@date 2018-09-21
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class MaxDepth:
def maxDepth(self, root): # 深度优先遍历
"""
:type root: TreeNode
:rtype: int
"""
if root == None:
return 0
node = root
if node.left == None and node.right == None:
return 1
else:#未到最底层需要递归遍历
return max(self.maxDepth(node.left),self.maxDepth(node.right)) + 1 |
#!/usr/bin/env python3
#### PYTHON IMPORTS ################################################################################
#### PACKAGE IMPORTS ###############################################################################
from src.helpers import canonicalize
#### GLOBALS #######################################################################################
API_TOKEN_PATH = canonicalize("github_api_token.txt")
#### CLASSES #######################################################################################
class EmptyAPITokenError(Exception):
"""
Exception raised by getAPIToken when API_TOKEN_PATH is an empty file.
"""
pass
#### FUNCTIONS #####################################################################################
def getAPIToken(filepath=API_TOKEN_PATH):
"""
Read the secret GitHub API token from 'github_api_token.txt'.
RETURN:
api_token (str) -- GitHub API token
"""
api_token = None
with open(filepath, "r") as f:
api_token = f.readline().strip("\n")
if api_token == "":
raise EmptyAPITokenError("GitHub API token cannot be empty!")
return api_token
#### MAIN ##########################################################################################
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 5 11:25:29 2019
@author: aesher9o1
"""
import smtplib
import pandas as pd
database = pd.read_excel('test.xlsx')
for i, j in database.iterrows():
#email
TO = str(j[2])
SUBJECT = 'Welcome to IAESTE family'
TEXT = 'Dear '+ str(j["Name"])+',\n\nWe welcome you aboard to the IAESTE family and hope your participation proves to be beneficial. We will be having our first General Body Meeting (GBM) soon.\n\nThis email is a final confirmation of your IAESTE membership.\nYour IAESTE Number is:'+ str(j["IAESTE No."])+'\n\nPlease make a note of it as the number will be the reference ID with respect to IAESTE henceforth.\n\nHappy Interning!! \n\nRegards,\n\nSarthak Sarbahi \nHead Administration IAESTE LC MUJ \n+91 - 9619937704'
# Gmail Sign In
gmail_sender = 'your_email_here'
gmail_passwd = 'your_password_here'
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(gmail_sender, gmail_passwd)
BODY = '\r\n'.join(['To: %s' % TO,
'From: %s' % gmail_sender,
'Subject: %s' % SUBJECT,
'', TEXT])
try:
server.sendmail(gmail_sender, [TO], BODY)
print ('email sent')
except:
print ('error sending mail')
server.quit()
|
# %% [markdown]
# # Notebook for converting the grocery upc spreadsheet into a database migration
# %%
import openpyxl
SPREADSHEET_FILEPATH = r'.\Grocery_UPC_Database.xlsx'
spreadsheet = openpyxl.load_workbook(SPREADSHEET_FILEPATH)
# %%
# %%
ws = spreadsheet.active
x = next(ws.iter_rows())
x
# %%
x[3].value
# %%
pickles = []
for row in ws.iter_rows():
if len(row) == 5:
(maker, product) = list(map(lambda x: x.value, row[3:5]))
screaming_product = product.upper()
keywords = ['PICKLE', 'KIMCHI', 'SAUERKRAUT']
for keyword in keywords:
if keyword in screaming_product:
print(f'Added {product} ({maker})')
pickles.append((maker, product))
break
else:
print(f'Row length: {len(row)}')
# %%
len(pickles)
# %%
[y for (x,y) in pickles if 'VLASIC' in y.upper()]
# %%
# now add values to data migration
# https://django.readthedocs.io/en/stable/topics/migrations.html#data-migrations
import pyperclip
pyperclip.copy('\n'.join(f'("{maker}", "{product}")' for (maker, product) in pickles))
# %%
|
import pytest
import zproc
@pytest.fixture
def ctx():
return zproc.Context()
@pytest.fixture
def swarm(ctx):
return ctx.create_swarm()
def test_regular(swarm):
r1 = swarm.map(pow, range(10 ** 5), args=[10])
r2 = map(lambda x: pow(x, 10), range(10 ** 5))
assert r1 == list(r2)
def test_lazy(swarm):
r1 = swarm.map_lazy(pow, range(10 ** 5), args=[10])
r2 = swarm.map_lazy(pow, range(10 ** 5), args=[10])
r3 = map(lambda x: pow(x, 10), range(10 ** 5))
assert list(r1) == r2.as_list == list(r3)
def test_nested_map(ctx):
@ctx.spawn
def p1(ctx: zproc.Context):
swarm = ctx.create_swarm()
return swarm.map(pow, range(100), args=[2])
assert p1.wait() == list(map(lambda x: pow(x, 2), range(100)))
def test_remote_result(ctx):
@ctx.spawn
def p2(ctx: zproc.Context):
swarm = ctx.create_swarm()
result = swarm.map_lazy(pow, range(100), args=[2])
return result.task_id
result = zproc.SequenceTaskResult(ctx.server_address, p2.wait()).as_list
assert result == list(map(lambda x: pow(x, 2), range(100)))
|
# Простой класс модели для условной игры с главным циклом и событиями.
# Вещание на 8-Bit Tea Party.
import sys
import string
import random
class Event():
items = ["Small heal", "Small poison", "Medium heal", "Medium poison", "Large heal", "Large poison"]
chances = [45, 65, 35, 75, 20, 80]
points = [10, -8, 20, -25, 35, -50]
def __init__(self):
print("Event constructor and default data for items.")
print("{0:16s} {1:8s} {2:8s}".format("Items:", "Chance:", "Points:"))
i = 0
while i < len(self.items):
print("{0:16s} {1:<8d} {2:<8d}".format(self.items[i], self.chances[i], self.points[i]))
i += 1
pass
def next_event(self):
event_type = random.randint(0, len(self.items) - 1);
item_gen = list()
if event_type % 2 == 0:
rnd_heal = random.randint(0, 100);
if rnd_heal < self.chances[event_type]:
item_gen.append(self.items[event_type])
item_gen.append(self.points[event_type])
else:
rnd_poison = random.randint(0, 100)
if rnd_poison < self.chances[event_type]:
item_gen.append(self.items[event_type])
item_gen.append(self.points[event_type])
return item_gen
pass
pass
# Simple model with events.
print("Model with simple events.")
counter = 0; counter_limit = 10; health = 100; events = Event()
print("\nStarting model counter.")
while counter < counter_limit:
print("Model counter:", counter, ", health:", health)
items = events.next_event()
print("List of random item:", items)
if items:
health += items[-1]
counter += 1
|
import e2e_test_framework
class APIE2ETestUserGroup(e2e_test_framework.APIE2ETest):
uri = "/api/v1/user/group"
get_tests = [
{
"name": "Read groups"
}
]
post_tests = [
{
"name": "Check name requirement",
"status": 400,
"return": 5042,
},
{
"name": "Check name minimum length constraint",
"status": 400,
"return": 5044,
"payload": {
"name": ""
}
},
{
"name": "Check name maximum length constraint",
"status": 400,
"return": 5044,
"payload": {
"name": "THISNAMEISTOOLONGFORAGROUP"
}
},
{
"name": "Check name maximum length constraint",
"status": 400,
"return": 5044,
"payload": {
"name": "THISNAMEISTOOLONGFORAGROUP"
}
},
{
"name": "Check name exists constraint",
"status": 400,
"return": 5049,
"payload": {
"name": "admins"
}
},
{
"name": "Check name charset constraint",
"status": 400,
"return": 5045,
"payload": {
"name": "!!!INVALID@@@"
}
},
{
"name": "Check scope options constraint",
"status": 400,
"return": 5046,
"payload": {
"name": "TEST_GROUP",
"scope": "INVALID_CHOICE"
}
},
{
"name": "Check member exists constraint",
"status": 400,
"return": 5047,
"payload": {
"name": "TEST_GROUP",
"scope": "local",
"member": ["NOT_A_VALID_UID"]
}
},
{
"name": "Check privilege exists constraint",
"status": 400,
"return": 5048,
"payload": {
"name": "TEST_GROUP",
"scope": "remote",
"member": [],
"priv": ["NOT_A_VALID_PRIVILEGE"]
}
},
{
"name": "Create group",
"payload": {
"name": "TEST_GROUP",
"scope": "local",
"member": [0],
"priv": ["page-all"]
}
}
]
put_tests = [
{
"name": "Check ID requirement",
"status": 400,
"return": 5050,
},
{
"name": "Check ID exists constraint",
"status": 400,
"return": 5051,
"payload": {
"id": "THIS_GROUP_DOES_NOT_EXIST"
}
},
{
"name": "Check name minimum length constraint",
"status": 400,
"return": 5044,
"payload": {
"id": "TEST_GROUP",
"name": ""
}
},
{
"name": "Check name maximum length constraint",
"status": 400,
"return": 5044,
"payload": {
"id": "TEST_GROUP",
"name": "THISNAMEISTOOLONGFORAGROUP"
}
},
{
"name": "Check name maximum length constraint",
"status": 400,
"return": 5044,
"payload": {
"id": "TEST_GROUP",
"name": "THISNAMEISTOOLONGFORAGROUP"
}
},
{
"name": "Check name exists constraint",
"status": 400,
"return": 5049,
"payload": {
"id": "TEST_GROUP",
"name": "admins"
}
},
{
"name": "Check name charset constraint",
"status": 400,
"return": 5045,
"payload": {
"id": "TEST_GROUP",
"name": "!!!INVALID@@@"
}
},
{
"name": "Check scope options constraint",
"status": 400,
"return": 5046,
"payload": {
"id": "TEST_GROUP",
"name": "TEST_GROUP",
"scope": "INVALID_CHOICE"
}
},
{
"name": "Check member exists constraint",
"status": 400,
"return": 5047,
"payload": {
"id": "TEST_GROUP",
"name": "TEST_GROUP",
"scope": "local",
"member": ["NOT_A_VALID_UID"]
}
},
{
"name": "Check privilege exists constraint",
"status": 400,
"return": 5048,
"payload": {
"id": "TEST_GROUP",
"name": "TEST_GROUP",
"scope": "remote",
"member": [],
"priv": ["NOT_A_VALID_PRIVILEGE"]
}
},
{
"name": "Update group",
"payload": {
"id": "TEST_GROUP",
"name": "TEST_GROUP",
"scope": "remote",
"member": [],
"priv": []
}
}
]
delete_tests = [
{
"name": "Check ID requirement",
"status": 400,
"return": 5050,
},
{
"name": "Check ID exists constraint",
"status": 400,
"return": 5051,
"payload": {
"id": "THIS_GROUP_DOES_NOT_EXIST"
}
},
{
"name": "Delete group",
"payload": {
"id": "TEST_GROUP"
}
}
]
APIE2ETestUserGroup()
|
"""The WebSocket Feed provides real-time market data updates for orders and
trades.
Find more here: `<https://docs.exchange.coinbase.com/#websocket-feed>`_
.. module:: websock
:synopsis: WebSocket Feed
.. moduleauthor:: Alexander Simeonov <agsimeon@buffalo.edu>
"""
from json import dumps, loads
from threading import Lock, Thread
from time import sleep
from websocket import create_connection
from cbexchange.client import APIClient
class WSClient(APIClient):
"""API Client for Coinbase Exchange WebSocket Feed.
This class starts in a disconnected state so make sure to connect before
attempting to receive any messages. When using the 'with' statement the
client connects and disconnects automatically.
Once connected the client starts a thread which keeps the WebSocket alive
using periodic pings. There will be only one keep alive thread per client
instance. If the WebSocket connection is somehow lost, the keep alive thread
will clean up and exit.
The client is iterable over the messages in the feed:
:Example:
>>> from cbexchange.websock import WSClient
>>> client = WSClient()
>>> client.connect()
>>> for message in client:
>>> print(message)
The client supports the 'with' statment:
:Example:
>>> from cbexchange.websock import WSClient
>>> with WSClient() as client:
>>> print(client.receive())
:param str ws_uri: WebSocket URI.
:param str ws_type: `<https://docs.exchange.coinbase.com/#subscribe>`_
:param str ws_product_id: `<https://docs.exchange.coinbase.com/#subscribe>`_
"""
WS_URI = 'wss://ws-feed.exchange.coinbase.com'
WS_TYPE = 'subscribe'
WS_PRODUCT_ID = 'BTC-USD'
def __init__(self, ws_uri=None, ws_type=None, ws_product_id=None):
self.WS_URI = ws_uri or self.WS_URI
self.WS_TYPE = ws_type or self.WS_TYPE
self.WS_PRODUCT_ID = ws_product_id or self.WS_PRODUCT_ID
self._ws = None
self._thread = None
self._lock = Lock()
def __iter__(self):
return self
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.disconnect()
def __next__(self):
"""Iterator function for Python 3.
:returns: the next message in the sequence
:rtype: dict
:raises StopIteration: if the WebSocket is not connected
"""
next = self.receive()
if next:
return next
raise StopIteration
# Iterator function for Python 2.
next = __next__
def _format_message(self, message):
"""Makes sure messages are Pythonic.
:param str message: raw message
:returns: Pythonic message
:rtype: dict
"""
return loads(message)
def _keep_alive_thread(self):
"""Used exclusively as a thread which keeps the WebSocket alive."""
while True:
with self._lock:
if self.connected():
self._ws.ping()
else:
self.disconnect()
self._thread = None
return
sleep(30)
def connect(self):
"""Connects and subscribes to the WebSocket Feed."""
if not self.connected():
self._ws = create_connection(self.WS_URI)
message = {
'type':self.WS_TYPE,
'product_id':self.WS_PRODUCT_ID
}
self._ws.send(dumps(message))
# There will be only one keep alive thread per client instance
with self._lock:
if not self._thread:
thread = Thread(target=self._keep_alive_thread, args=[])
thread.start()
def disconnect(self):
"""Disconnects from the WebSocket Feed."""
if self.connected():
self._ws.close()
self._ws = None
def receive(self):
"""Receive the next message in the sequence.
:returns: the next message in the sequence, None if not connected
:rtype: dict
"""
if self.connected():
return self._format_message(self._ws.recv())
return None
def connected(self):
"""Checks if we are connected to the WebSocket Feed.
:returns: True if connected, otherwise False
:rtype: bool
"""
if self._ws:
return self._ws.connected
return False
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
#
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
#
"""
DataPreparation
---------------
Machine learning relies on good dataset. DataPreparation prepare
the dataset before it is used for training a machine learning
model.
Data preparation includes the following steps:
1. Clean up the dataset. Samples with blank values shall be removed
according to recommendation from ML books
2. String/Boolean values will be converted into integers
3. Integer/flow values will be normalized, so samples can span
out for easier identification/categorization
4. Upsampling is used for imbalanced dataset
"""
import pandas as pds
from sklearn.preprocessing import MinMaxScaler
import sys
class DataPreparation(object):
@staticmethod
def drop_samples_with_missing_value(df):
"""
[Optional step in pre-processing samples]
It is recommended to handle those samples with missing value(s) first.
The easiest approach is just removing all of them
:param df: input dataframe to process
:return:
"""
df = df.dropna(axis=0)
df = df.dropna(asix=1)
@staticmethod
def normalize_samples(samples):
"""
Feature scaling is crucial for most learning models
:param samples: samples to normalize
:return: normalized samples
"""
mms = MinMaxScaler()
samples = mms.fit_transform(samples)
return samples
@staticmethod
def one_hot_encoding(df, features):
"""
Use pandas function to do one hot encoding. Basically string features need to
be converted/encoded into categorical integers.
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html
:param df: input dataframe of samples to handle
:param features: list of features to be encoded using one hot encoding
:return: encoded dataframe of samples
"""
return pds.get_dummies(df[features])
@staticmethod
def upsample_minorities(df_training, imbalance_upsampling=None):
"""
Note upsampling shall be down for training set ONLY.
:param df_training: dataframe for training
:param prediction: field to predict
:param imbalance_upsampling: None, True, False, or a dict
:return: upsampled dataframe
"""
if imbalance_upsampling is None or not imbalance_upsampling:
# nothing to do
return df_training
if imbalance_upsampling == True:
#
# upsample every class to match the majority
#
counts = df_training.iloc[:, -1].value_counts()
# first we need to figure out the count for the majority class
maj_count = 0
# check Python version and use appropriate method to return iterable list
if sys.version_info[0] < 3:
items = counts.iteritems()
else:
items = counts.items()
for itr in items:
if itr[1] > maj_count:
maj_count = itr[1]
# go through all the classes again and upsample if necessary
dataf = pds.DataFrame()
for itr in items:
#
# itr is a tuple (class_value, count)
# Here we want to extract all the samples with the same
# predicted value
#
class_df = df_training[df_training.iloc[:, -1] == itr[0]]
if itr[1] < maj_count:
#
# Need to upsample it to maj_count
#
class_df = class_df.sample(maj_count,
replace=True)
dataf = pds.concat([dataf, class_df], axis=0)
return dataf
return df_training
@staticmethod
def remove_samples_with_values(data_frame, prediction, value_list):
"""
Clean up the data_frame. Some of the values of the prediction can
confuse the ML model. For example, unknowns.
Customer can choose to remove those samples.
:param data_frame: dataframe to clean up
:param prediction: prediction
:param value_list: prediction values to remove
:return: cleaned up dataframe
"""
dataf = data_frame
for value in value_list:
dataf = dataf[dataf[prediction] != value]
# Need to re-arrange the index after dropping rows
dataf.index = range(len(dataf))
return dataf
|
# Given n pairs of parentheses, write a function to generate
# all combinations of well-formed parentheses.
#
# For example, given n = 3, a solution set is:
#
# [
# "((()))",
# "(()())",
# "(())()",
# "()(())",
# "()()()"
# ]
class Solution:
def generateParenthesis(self, n):
res = []
def backtrack(S, left, right):
if len(S) == 2 * n:
res.append(S)
return
if left < n:
backtrack(S + "(", left + 1, right)
if right < left:
backtrack(S + ")", left, right + 1)
backtrack("", 0, 0)
return res
|
from django.contrib import admin
from CryptoMessageApi.models import Message, Conversation, Device, User
# Register your models here.
admin.site.register(Device)
admin.site.register(Message)
admin.site.register(Conversation)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.